hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
63864bd0c8cd0935d04afd12f3430b88b366b42e | 8,792 | py | Python | sources/rnt/mediane/process.py | bryan-brancotte/rank-aggregation-with-ties | 15fffb0b1bee3d6cef7090486a7c910e5f51195d | [
"Apache-2.0"
] | null | null | null | sources/rnt/mediane/process.py | bryan-brancotte/rank-aggregation-with-ties | 15fffb0b1bee3d6cef7090486a7c910e5f51195d | [
"Apache-2.0"
] | 11 | 2018-04-04T08:24:30.000Z | 2021-03-19T21:45:04.000Z | sources/rnt/mediane/process.py | bryan-brancotte/rank-aggregation-with-ties | 15fffb0b1bee3d6cef7090486a7c910e5f51195d | [
"Apache-2.0"
] | 1 | 2018-10-25T09:13:41.000Z | 2018-10-25T09:13:41.000Z | from django.utils import timezone
from django.utils.translation import ugettext
from mediane.algorithms.enumeration import get_name_from
from mediane.algorithms.lri.BioConsert import BioConsert
from mediane.algorithms.lri.ExactAlgorithm import ExactAlgorithm
from mediane.algorithms.misc.borda_count import BordaCount
from mediane.distances.KendallTauGeneralizedNlogN import KendallTauGeneralizedNlogN
from mediane.distances.enumeration import GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION
from mediane.median_ranking_tools import parse_ranking_with_ties_of_str, dump_ranking_with_ties_to_str
from mediane.normalizations.enumeration import NONE, UNIFICATION, PROJECTION
from mediane.normalizations.unification import Unification
from mediane.normalizations.projection import Projection
MIN_MEASURE_DURATION = 3
def execute_median_rankings_computation_from_rankings(
rankings,
algorithm,
normalization,
distance,
precise_time_measurement,
dataset=None,
algorithms=None,
):
if str(normalization) == "Unification":
rankings_real = Unification.rankings_to_rankings(rankings)
elif str(normalization) == "Projection":
rankings_real = Projection.rankings_to_rankings(rankings)
else:
rankings_real = rankings
if algorithms:
return [execute_median_rankings_computation_from_rankings(
rankings=rankings_real,
algorithm=a,
normalization=normalization,
distance=distance,
precise_time_measurement=precise_time_measurement,
dataset=dataset,
) for a in algorithms]
iteration = 1
start_timezone = timezone.now()
c = algorithm.compute_median_rankings(rankings=rankings_real, distance=distance)
duration = (timezone.now() - start_timezone).total_seconds()
while precise_time_measurement and duration < MIN_MEASURE_DURATION:
# print(iteration, duration)
iteration = int((iteration / duration) * MIN_MEASURE_DURATION * 1.1)
rang_iter = range(2, iteration)
start_timezone = timezone.now()
for k in rang_iter:
algorithm.compute_median_rankings(rankings=rankings_real, distance=distance)
duration = (timezone.now() - start_timezone).total_seconds()
return dict(
dataset=dict(
id=-1,
name=ugettext('typed'),
) if dataset is None else
dict(
id=dataset.id,
name=str(dataset),
),
consensus=c,
distance=KendallTauGeneralizedNlogN(distance).get_distance_to_a_set_of_rankings(
c[0],
rankings=rankings,
)[distance.id_order],
duration=(int(duration / iteration * 1000.0 * 1000.0 * 1000.0)) / 1000.0 / 1000.0,
algo=dict(
id=algorithm.get_full_name(),
name=str(get_name_from(algorithm.get_full_name())),
),
)
def execute_median_rankings_computation_from_datasets(
datasets,
algorithm,
normalization,
distance,
precise_time_measurement,
algorithms=None,
):
submission_results = []
algorithms = algorithms or []
if algorithm is not None:
algorithms.append(algorithm)
for d in datasets:
if not d.complete:
if str(normalization) == "Unification":
rankings_real = Unification.rankings_to_rankings(d.rankings)
elif str(normalization) == "Projection":
rankings_real = Projection.rankings_to_rankings(d.rankings)
else:
rankings_real = d.rankings
else:
rankings_real = d.rankings
for a in algorithms:
submission_results.append(
execute_median_rankings_computation_from_rankings(
rankings=rankings_real,
algorithm=a,
normalization=normalization,
distance=distance,
precise_time_measurement=precise_time_measurement,
dataset=d,
)
)
return submission_results
def create_computation_job(
datasets,
normalization,
distance,
precise_time_measurement,
algorithms,
owner,
):
from mediane import models
job = models.Job.objects.create(
owner=owner,
dist=distance,
norm=normalization,
creation=timezone.now(),
bench=precise_time_measurement,
identifier=None,
)
for d in datasets:
for a in algorithms:
r = models.Result.objects.create(
algo=a,
dataset=d,
job=job,
)
r.mark_as_todo()
job.update_task_count()
return job
def execute_median_rankings_computation_of_result(
result,
):
submission_result = execute_median_rankings_computation_from_rankings(
rankings=result.dataset.rankings,
algorithm=result.algo.get_instance(),
normalization=result.job.norm,
distance=result.job.dist,
precise_time_measurement=result.job.bench,
dataset=result.dataset,
)
result.consensuses = '\n'.join([dump_ranking_with_ties_to_str(c) for c in submission_result["consensus"]])
result.distance_value = submission_result["distance"]
result.duration = submission_result["duration"]
result.save()
def cleanup_dataset(rankings_as_one_str):
if rankings_as_one_str is None:
return ""
rankings_as_one_str = rankings_as_one_str.replace("\r", "")
rankings_as_one_str = rankings_as_one_str.replace("\\\n", "")
rankings_as_one_str = rankings_as_one_str.replace(":\n", "")
if rankings_as_one_str[-1] == ':':
rankings_as_one_str = rankings_as_one_str[:-1]
return rankings_as_one_str
def evaluate_dataset_and_provide_stats(rankings_str):
evaluation = {}
elements = None
rankings = []
complete = True
invalid_rankings = {}
cpt = -1
for ranking_str in rankings_str:
cpt += 1
try:
ranking = parse_ranking_with_ties_of_str(ranking_str)
except ValueError as e:
invalid_rankings[cpt] = e.args if len(e.args) > 1 else e.args[0]
ranking = []
rankings.append(ranking)
ranking_elements = set()
for bucket in ranking:
for element in bucket:
if element in ranking_elements:
invalid_rankings[cpt] = "Duplicated element '%s'" % element
ranking_elements.add(element)
if elements is None:
elements = ranking_elements
if ranking_elements != elements:
complete = False
elements.update(ranking_elements)
evaluation["complete"] = complete
evaluation["n"] = len(elements)
evaluation["m"] = len(rankings)
evaluation["invalid"] = len(invalid_rankings) > 0
evaluation["invalid_rankings_id"] = invalid_rankings
evaluation["rankings"] = rankings
return evaluation
def compute_consensus_settings_based_on_datasets(
n,
m,
complete,
rankings,
user,
dbdatasets=None,
algos=None,
):
"""
:param n:
:param m:
:param complete:
:param rankings:
:param user: the user for which we are find the best settings, should be used to
not select an algorithm/distance/norm that is not visible by the user
:param dbdatasets:
:param algos:
:return:
"""
dbdatasets = [] if dbdatasets is None else dbdatasets
algos = [] if algos is None else algos
from mediane.models import Distance, Normalization, Algorithm
consensus_settings = {}
consensus_settings["algo"] = Algorithm.objects.get(key_name=str(BioConsert().get_full_name())).pk
consensus_settings["dist"] = Distance.objects.get(key_name=GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION).pk
# consensus_settings["norm"] = Normalization.objects.get(key_name=NONE if complete else UNIFICATION).pk
consensus_settings["norm"] = Normalization.objects.get(key_name=NONE).pk
if n < 70 and ExactAlgorithm().can_be_executed():
consensus_settings["algo"] = Algorithm.objects.get(key_name=str(ExactAlgorithm().get_full_name())).pk
elif n > 100 or len(dbdatasets) * len(algos) > 20:
consensus_settings["algo"] = Algorithm.objects.get(key_name=str(BordaCount().get_full_name())).pk
# consensus_settings["auto_compute"] = n < 50 and len(dbdatasets) * len(algos) < 50
consensus_settings["auto_compute"] = False
consensus_settings["bench"] = False
consensus_settings["extended_analysis"] = len(dbdatasets) * len(algos) > 50
# print(consensus_settings)
return consensus_settings
| 35.595142 | 116 | 0.66242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 805 | 0.091561 |
6386ae51574c0702fa4ae47e2c3e29449d380984 | 71 | py | Python | utils_demo/percentage_format.py | IBM/nesa-demo | 4e87217f44ff66414f78df6962ee8633d89f0cf5 | [
"MIT"
] | 2 | 2021-12-16T13:16:56.000Z | 2022-01-19T14:23:18.000Z | utils_demo/percentage_format.py | SocioProphet/nesa-demo | 4e87217f44ff66414f78df6962ee8633d89f0cf5 | [
"MIT"
] | null | null | null | utils_demo/percentage_format.py | SocioProphet/nesa-demo | 4e87217f44ff66414f78df6962ee8633d89f0cf5 | [
"MIT"
] | 1 | 2022-03-07T19:57:59.000Z | 2022-03-07T19:57:59.000Z | def percentage_format(x: float) -> str:
return f"{(x * 100):.1f}%"
| 23.666667 | 39 | 0.591549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.267606 |
6389dbaa2631db7b30ed8767e0e5b57b31566133 | 399 | py | Python | restaurantapp/mainapp/migrations/0003_auto_20200508_1206.py | ShubhamJain0/ShubhamJain0.github.io | bf73d2a8f55c3362908c8102d1788a34627dba44 | [
"MIT"
] | null | null | null | restaurantapp/mainapp/migrations/0003_auto_20200508_1206.py | ShubhamJain0/ShubhamJain0.github.io | bf73d2a8f55c3362908c8102d1788a34627dba44 | [
"MIT"
] | 4 | 2021-04-08T21:52:50.000Z | 2022-02-10T09:29:03.000Z | restaurantapp/mainapp/migrations/0003_auto_20200508_1206.py | ShubhamJain0/Restaurant-App | bf73d2a8f55c3362908c8102d1788a34627dba44 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2020-05-08 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0002_auto_20200508_1115'),
]
operations = [
migrations.AlterField(
model_name='yourorder',
name='phone',
field=models.CharField(max_length=10, null=True),
),
]
| 21 | 61 | 0.60401 | 306 | 0.766917 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.24812 |
6389dc63d6c399ed10f73f80566508686888935c | 82 | py | Python | pybomberman/__init__.py | pybomberman/pybomberman | 8c7582ec52bf0dd1d77a3e98f5867ffa97233653 | [
"MIT"
] | 2 | 2021-03-29T08:44:54.000Z | 2021-05-03T23:34:06.000Z | pybomberman/__init__.py | pybomberman/pybomberman | 8c7582ec52bf0dd1d77a3e98f5867ffa97233653 | [
"MIT"
] | null | null | null | pybomberman/__init__.py | pybomberman/pybomberman | 8c7582ec52bf0dd1d77a3e98f5867ffa97233653 | [
"MIT"
] | null | null | null | from .map import Map
print("Soon... https://github.com/pybomberman/pybomberman")
| 20.5 | 59 | 0.743902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.634146 |
638b620923dedf797dae35ba43746969775844b6 | 3,935 | py | Python | cd2h_repo_project/modules/doi/schemas.py | galterlibrary/InvenioRDM-at-NU | 5aff6ac7c428c9a61bdf221627bfc05f2280d1a3 | [
"MIT"
] | 6 | 2019-09-02T00:01:50.000Z | 2021-11-04T08:23:40.000Z | cd2h_repo_project/modules/doi/schemas.py | galterlibrary/InvenioRDM-at-NU | 5aff6ac7c428c9a61bdf221627bfc05f2280d1a3 | [
"MIT"
] | 72 | 2019-09-04T18:52:35.000Z | 2020-07-21T19:58:15.000Z | cd2h_repo_project/modules/doi/schemas.py | galterlibrary/InvenioRDM-at-NU | 5aff6ac7c428c9a61bdf221627bfc05f2280d1a3 | [
"MIT"
] | null | null | null | """JSON Schemas."""
import csv
from collections import defaultdict
from datetime import date
from os.path import dirname, join, realpath
from flask import current_app
from marshmallow import Schema, fields
from cd2h_repo_project.modules.records.resource_type import ResourceType
class DataCiteResourceTypeMap(object):
"""DataCite Resource Type Mapping.
TODO: If we extract this module out, make this class a configuration
setting.
"""
def __init__(self):
"""Constructor."""
self.filename = join(
dirname(dirname(realpath(__file__))),
'records', 'data', 'resource_type_mapping.csv'
)
with open(self.filename) as f:
reader = csv.DictReader(f)
self.map = {
(row['Group'].lower(), row['Name'].lower()):
row['DataCite'].strip()
for row in reader
}
def get(self, key, default=None):
"""Return the mapped value.
`key` is (<general resource type>, <specific resource type>).
"""
return self.map.get(key, default)
class DataCiteResourceTypeSchemaV4(Schema):
"""ResourceType schema."""
resourceTypeGeneral = fields.Method('get_general_resource_type')
resourceType = fields.Method('get_specific_resource_type')
def get_general_resource_type(self, resource_type):
"""Return DataCite's controlled vocabulary General Resource Type."""
resource_type_obj = ResourceType.get(
resource_type['general'], resource_type['specific']
)
return resource_type_obj.map(DataCiteResourceTypeMap())
def get_specific_resource_type(self, resource_type):
"""Return title-ized Specific Resource Type."""
return resource_type['specific'].title()
class DataCiteTitleSchemaV4(Schema):
"""Title schema."""
title = fields.Str()
class DataCiteCreatorSchemaV4(Schema):
"""Creator schema.
Each of these fields are inside the `creator` node.
"""
creatorName = fields.Str(attribute='full_name')
# TODO (optional): sub creatorName: nameType
givenName = fields.Str(attribute='first_name')
familyName = fields.Str(attribute='last_name')
# TODO (optional):
# nameIdentifier
# nameIdentifierScheme
# schemeURI
# affiliation
class DataCiteSchemaV4(Schema):
"""Schema for DataCite Metadata.
For now, only the minimum required fields are implemented. In the future,
we may want to include optional fields as well.
Fields and subfields are based on
schema.datacite.org/meta/kernel-4.1/doc/DataCite-MetadataKernel_v4.1.pdf
"""
identifier = fields.Method('get_identifier', dump_only=True)
# NOTE: This auto-magically serializes the `creators` and `creator` nodes.
creators = fields.List(
fields.Nested(DataCiteCreatorSchemaV4),
attribute='metadata.authors',
dump_only=True)
titles = fields.List(
fields.Nested(DataCiteTitleSchemaV4),
attribute='metadata',
dump_only=True)
publisher = fields.Method('get_publisher', dump_only=True)
publicationYear = fields.Method('get_year', dump_only=True)
resourceType = fields.Nested(
DataCiteResourceTypeSchemaV4,
attribute='metadata.resource_type',
dump_only=True)
def get_identifier(self, data):
"""Get record main identifier."""
return {
# If no DOI, 'DUMMY' value is used and will be ignored by DataCite
'identifier': data.get('metadata', {}).get('doi') or 'DUMMY',
'identifierType': 'DOI'
}
def get_publisher(self, data):
"""Extract publisher."""
return current_app.config['DOI_PUBLISHER']
def get_year(self, data):
"""Extract year.
Current year for now.
TODO: Revisit when dealing with embargo.
"""
return date.today().year
| 30.503876 | 78 | 0.653621 | 3,639 | 0.924778 | 0 | 0 | 0 | 0 | 0 | 0 | 1,591 | 0.40432 |
638ca1bd53358131bd94d7e2bbba9486f9304c18 | 1,068 | py | Python | python/dazl/model/__init__.py | DACH-NY/dazl-client | 56c8b1be047415b2bcb35b6558de4a780a402458 | [
"Apache-2.0"
] | null | null | null | python/dazl/model/__init__.py | DACH-NY/dazl-client | 56c8b1be047415b2bcb35b6558de4a780a402458 | [
"Apache-2.0"
] | null | null | null | python/dazl/model/__init__.py | DACH-NY/dazl-client | 56c8b1be047415b2bcb35b6558de4a780a402458 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
:mod:`dazl.model` package
=========================
This module is deprecated. These types have generally moved to :mod:`dazl.client` (for the API
introduced in dazl v5) or :mod:`dazl.protocols` (for the API introduced in dazl v8).
.. automodule:: dazl.model.core
.. automodule:: dazl.model.ledger
.. automodule:: dazl.model.lookup
.. automodule:: dazl.model.network
.. automodule:: dazl.model.reading
.. automodule:: dazl.model.types
.. automodule:: dazl.model.types_store
.. automodule:: dazl.model.writing
"""
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from . import core, ledger, lookup, network, reading, types, writing
__all__ = ["core", "ledger", "lookup", "network", "reading", "writing"]
warnings.warn(
"dazl.model is deprecated; these types have moved to either dazl.ledger or dazl.client.",
DeprecationWarning,
stacklevel=2,
)
| 30.514286 | 102 | 0.712547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 803 | 0.751873 |
638cb4fc9dbdd295d6a3f3e2773674502d0d5ab5 | 1,559 | py | Python | paxes_cinder/scheduler/filters/storage_protocol_filter.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | paxes_cinder/scheduler/filters/storage_protocol_filter.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | paxes_cinder/scheduler/filters/storage_protocol_filter.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters
from cinder.openstack.common.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
class StorageProtocolFilter(filters.BaseHostFilter):
"""StorageProtocol filters based on volume host's storage protocol."""
def _satisfies_metadata(self, host_stats, metadata):
req = metadata.get('storage_protocol', None)
if req is None:
return True
try:
cap = host_stats.get('storage_protocol', None)
except AttributeError:
try:
cap = host_stats.capabilities.get('storage_protocol', None)
except AttributeError:
return False
if cap is None:
return False
if not extra_specs_ops.match(cap, req):
LOG.debug(_("storage protocol requirement '%(req)s' does not match "
"'%(cap)s'"), {'req': req, 'cap': cap})
return False
return True
def host_passes(self, host_state, filter_properties):
"""Return True if host has storage protocol eq metadata storage protocol."""
metadata = filter_properties.get('metadata', None)
if metadata is None:
return True
if not self._satisfies_metadata(host_state, metadata):
LOG.debug(_("%(host_state)s fails metadata strorage_protocol "
"requirements"), {'host_state': host_state})
return False
return True
| 38.02439 | 84 | 0.62925 | 1,346 | 0.863374 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.233483 |
6390b3aacb002c83ef8bc4d3f72ff626a4abe7f1 | 1,771 | py | Python | Bioinformatics IV/Week IV/PeptideSequencingProblem.py | egeulgen/Bioinformatics_Specialization | 38581b471a54c41d780d9eeb26a7033eb57f3a01 | [
"MIT"
] | 3 | 2021-04-03T23:46:42.000Z | 2021-08-08T01:19:32.000Z | Bioinformatics IV/Week IV/PeptideSequencingProblem.py | egeulgen/Bioinformatics_Specialization | 38581b471a54c41d780d9eeb26a7033eb57f3a01 | [
"MIT"
] | null | null | null | Bioinformatics IV/Week IV/PeptideSequencingProblem.py | egeulgen/Bioinformatics_Specialization | 38581b471a54c41d780d9eeb26a7033eb57f3a01 | [
"MIT"
] | null | null | null | import sys
from copy import deepcopy
mass_file=open('integer_mass_table.txt')
mass_table = {}
for line in mass_file:
aa, mass = line.rstrip().split(' ')
mass_table[int(mass)] = aa
# mass_table[4] = 'X'
# mass_table[5] = 'Z'
def PeptideSequencing(spectral_vector):
spectral_vector = [0] + spectral_vector
adj_list = []
for i in range(len(spectral_vector)):
for j in range(i, len(spectral_vector)):
if (j - i) in mass_table.keys():
adj_list.append([i, j])
adj_dict = {}
for i in range(len(spectral_vector)):
for j in range(i, len(spectral_vector)):
if (j - i) in mass_table.keys():
tmp = [i, mass_table[j - i]]
if not j in adj_dict.keys():
adj_dict[j] = [tmp]
else:
adj_dict[j].append(tmp)
scores = {0: [0, '-']}
for node in adj_dict.keys():
scores[node] = [-1e6, '-']
tmp = adj_dict[node]
for x in tmp:
if x[0] != 0:
scores[x[0]] = [-1e6, '-']
for node in adj_dict.keys():
max_score = -1e6
bold_edge = '-'
for parent in adj_dict[node]:
score = scores[parent[0]][0]
if score > max_score:
max_score = score
bold_edge = parent
scores[node] = [max_score + spectral_vector[node], bold_edge]
node = list(scores.keys())[-1]
peptide = ''
while node != 0:
peptide = scores[node][1][1] + peptide
node = scores[node][1][0]
return peptide
if __name__ == "__main__":
spectral_vector = [int(x) for x in sys.stdin.read().rstrip().split(' ')]
# print(spectral_vector)
print(PeptideSequencing(spectral_vector))
| 28.564516 | 76 | 0.539243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.067758 |
6390c12244c07615289c789c791dd14d31ca59c7 | 2,465 | py | Python | test/likelihoods/test_multitask_gaussian_likelihood.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 2,673 | 2018-02-19T22:28:58.000Z | 2022-03-31T13:22:28.000Z | test/likelihoods/test_multitask_gaussian_likelihood.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 1,415 | 2018-02-19T20:38:20.000Z | 2022-03-30T12:53:13.000Z | test/likelihoods/test_multitask_gaussian_likelihood.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 467 | 2018-03-07T02:06:05.000Z | 2022-03-27T07:05:44.000Z | #!/usr/bin/env python3
import unittest
import torch
from gpytorch.distributions import MultitaskMultivariateNormal
from gpytorch.lazy import KroneckerProductLazyTensor, RootLazyTensor
from gpytorch.likelihoods import MultitaskGaussianLikelihood
from gpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase
class TestMultitaskGaussianLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
seed = 2
def _create_conditional_input(self, batch_shape=torch.Size([])):
return torch.randn(*batch_shape, 5, 4)
def _create_marginal_input(self, batch_shape=torch.Size([])):
mat = torch.randn(*batch_shape, 5, 5)
mat2 = torch.randn(*batch_shape, 4, 4)
covar = KroneckerProductLazyTensor(RootLazyTensor(mat), RootLazyTensor(mat2))
return MultitaskMultivariateNormal(torch.randn(*batch_shape, 5, 4), covar)
def _create_targets(self, batch_shape=torch.Size([])):
return torch.randn(*batch_shape, 5, 4)
def create_likelihood(self):
return MultitaskGaussianLikelihood(num_tasks=4, rank=2)
def test_setters(self):
likelihood = MultitaskGaussianLikelihood(num_tasks=3, rank=0)
a = torch.randn(3, 2)
mat = a.matmul(a.transpose(-1, -2))
# test rank 0 setters
likelihood.noise = 0.5
self.assertAlmostEqual(0.5, likelihood.noise.item())
likelihood.task_noises = torch.tensor([0.04, 0.04, 0.04])
for i in range(3):
self.assertAlmostEqual(0.04, likelihood.task_noises[i].item())
with self.assertRaises(AttributeError) as context:
likelihood.task_noise_covar = mat
self.assertTrue("task noises" in str(context.exception))
# test low rank setters
likelihood = MultitaskGaussianLikelihood(num_tasks=3, rank=2)
likelihood.noise = 0.5
self.assertAlmostEqual(0.5, likelihood.noise.item())
likelihood.task_noise_covar = mat
self.assertAllClose(mat, likelihood.task_noise_covar)
with self.assertRaises(AttributeError) as context:
likelihood.task_noises = torch.tensor([0.04, 0.04, 0.04])
self.assertTrue("task noises" in str(context.exception))
class TestMultitaskGaussianLikelihoodBatch(TestMultitaskGaussianLikelihood):
seed = 0
def create_likelihood(self):
return MultitaskGaussianLikelihood(num_tasks=4, rank=2, batch_shape=torch.Size([3]))
def test_nonbatch(self):
pass
| 35.214286 | 92 | 0.708722 | 2,136 | 0.866531 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.037323 |
639138968935973bda9f7100f85f9fc9166454f1 | 399 | py | Python | zip/unzip_print.py | juarezhenriquelisboa/Python | 5c5498b33e7cba4e3bfa322a6a76bed74b68e6bf | [
"MIT"
] | 1 | 2021-01-01T14:46:28.000Z | 2021-01-01T14:46:28.000Z | zip/unzip_print.py | juarezhenriquelisboa/Python | 5c5498b33e7cba4e3bfa322a6a76bed74b68e6bf | [
"MIT"
] | null | null | null | zip/unzip_print.py | juarezhenriquelisboa/Python | 5c5498b33e7cba4e3bfa322a6a76bed74b68e6bf | [
"MIT"
] | null | null | null | import zipfile
import sys
for arg in sys.argv[1:]:
senha = str(arg)
z = zipfile.ZipFile("protegido.zip")
files = z.namelist()
z.setpassword(senha)
z.extractall()
z.close()
for extracted_file in files:
print "Nome do arquivo: "+extracted_file+"\n\nConteudo: "
with open(extracted_file) as f:
content = f.readlines()
print ''.join(content)
print '\n\n'
| 16.625 | 61 | 0.639098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.145363 |
6392f325bfe2c22484f1ffe055193199e29b8c30 | 948 | py | Python | awards/forms.py | JKimani77/awards | 8cdfaadbd4aca5ef2031966496ebcb5c3c3ea49e | [
"MIT"
] | null | null | null | awards/forms.py | JKimani77/awards | 8cdfaadbd4aca5ef2031966496ebcb5c3c3ea49e | [
"MIT"
] | null | null | null | awards/forms.py | JKimani77/awards | 8cdfaadbd4aca5ef2031966496ebcb5c3c3ea49e | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.contrib.auth.models import User
from .models import Profile,Project,Review
class RegForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ('username','email', 'password1','password2')
class LoginForm(AuthenticationForm):
username = forms.CharField(label='Username', max_length=254)
password = forms.CharField(label='Password',widget=forms.PasswordInput)
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('profile_pic','bio')
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ('title','description','project_pic','project_link')
class RatingForm(forms.ModelForm):
class Meta:
model = Review
fields =('design','usability','content') | 26.333333 | 75 | 0.681435 | 743 | 0.783755 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.160338 |
63941acd5ea7c627eccf6daf6a03aa134aea7caf | 1,269 | py | Python | setup.py | aaren/pharminv | b3d3d11c81bafa40a72583aa98f51b05acec9835 | [
"BSD-3-Clause"
] | 12 | 2015-05-12T21:02:44.000Z | 2021-04-24T23:05:16.000Z | setup.py | aaren/pharminv | b3d3d11c81bafa40a72583aa98f51b05acec9835 | [
"BSD-3-Clause"
] | 5 | 2015-04-05T17:05:09.000Z | 2022-02-11T16:46:57.000Z | setup.py | aaren/pharminv | b3d3d11c81bafa40a72583aa98f51b05acec9835 | [
"BSD-3-Clause"
] | 3 | 2018-04-27T20:10:10.000Z | 2022-02-16T13:12:02.000Z | import subprocess
from setuptools import setup, Extension
try:
pandoc = subprocess.Popen(['pandoc', 'README.md', '--to', 'rst'],
stdout=subprocess.PIPE)
readme = pandoc.communicate()[0].decode()
except OSError:
with open('README.md') as f:
readme = f.read()
cmdclass = {}
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
ext = '.pyx' if USE_CYTHON else '.c'
try:
import numpy
except ImportError:
exit('Install numpy before installing pharminv.')
extensions = [Extension('harminv.charminv',
["harminv/charminv" + ext],
include_dirs=[numpy.get_include()],
libraries=['harminv'],
)]
if USE_CYTHON:
extensions = cythonize(extensions)
from Cython.Distutils import build_ext
cmdclass.update(build_ext=build_ext)
setup(
name='pharminv',
version="0.4",
description='Python interface to harminv',
long_description=readme,
packages=['harminv'],
author="Aaron O'Leary",
author_email='dev@aaren.me',
license='GPLv3',
url='http://github.com/aaren/pharminv',
cmdclass=cmdclass,
ext_modules=extensions
)
| 23.943396 | 69 | 0.618597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.206462 |
63945460f91035ee579fbea1f037bd4fecb49b1e | 8,830 | py | Python | RiskQuantLib/Tool/databaseTool.py | SyuyaMurakami/RiskQuantLib-Doc | 2503befc24c2e422e51f8b9f468c8d8439e11c65 | [
"MIT"
] | 1 | 2021-12-29T12:18:45.000Z | 2021-12-29T12:18:45.000Z | RiskQuantLib/Tool/databaseTool.py | SyuyaMurakami/RiskQuantLib-Doc | 2503befc24c2e422e51f8b9f468c8d8439e11c65 | [
"MIT"
] | null | null | null | RiskQuantLib/Tool/databaseTool.py | SyuyaMurakami/RiskQuantLib-Doc | 2503befc24c2e422e51f8b9f468c8d8439e11c65 | [
"MIT"
] | 1 | 2021-12-08T02:14:34.000Z | 2021-12-08T02:14:34.000Z | #!/usr/bin/python
#coding = utf-8
import numpy as np
import pandas as pd
import mysql.connector
class mysqlTool():
"""
This is the API to connect with mysql database.
"""
def __init__(self,databaseNameString:str,hostAddress:str,userName:str,passWord:str):
self.targetDB = mysql.connector.connect(
host = hostAddress,
user = userName,
passwd = passWord,
database = databaseNameString
# buffered = True
)
self.targetCursor = self.targetDB.cursor(buffered=True)
def getAllTables(self):
self.targetCursor.execute("SHOW TABLES")
return [i for i in self.targetCursor]
def getColNameOfTable(self,tableNameString:str):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
return [i for i in self.targetCursor.column_names]
def selectAllFromTable(self,tableNameString:str):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def selectDictFromTable(self,tableNameString:str,colNameAsKey:str,colNameAsValue:str):
try:
sql = "SELECT "+colNameAsKey+","+colNameAsValue+" FROM "+tableNameString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
resultDict = dict(zip([i[0] for i in result],[i[1] for i in result]))
return resultDict
except Exception as e:
print(e)
return {}
def selectColFromTable(self,tableNameString:str,colNameList:list):
colNameString = "".join(["`"+i+"`," for i in colNameList]).strip(",")
sql = "SELECT "+colNameString+" FROM "+tableNameString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def selectColFromTableWithCondition(self,tableNameString:str,colNameList:list,conditionString:str):
colNameString = "".join(["`"+i+"`," for i in colNameList]).strip(",")
sql = "SELECT "+colNameString+" FROM "+tableNameString+" WHERE "+conditionString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def selectAllFromTableWithCondition(self,tableNameString:str,conditionString:str):
sql = "SELECT * FROM "+tableNameString+" WHERE "+conditionString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def insertRowIntoTable(self,tableNameString:str,valuesTuple:tuple):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
colNameString = "".join(["`"+i+"`," for i in self.targetCursor.column_names]).strip(", ")
sql = "INSERT INTO "+tableNameString+" ("+colNameString+") VALUES (" + "".join(["%s, " for i in range(len(self.targetCursor.column_names))]).strip(", ")+")"
val = valuesTuple
self.targetCursor.execute(sql,val)
self.targetDB.commit()
print("Insert Finished")
def replaceRowsIntoTable(self,tableNameString:str,valuesTupleList:list):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
colNameString = "".join(["`"+i+"`," for i in self.targetCursor.column_names]).strip(", ")
sql = "REPLACE INTO "+tableNameString+" ("+colNameString+") VALUES (" + "".join(["%s, " for i in range(len(self.targetCursor.column_names))]).strip(", ")+")"
val = valuesTupleList
self.targetCursor.executemany(sql, val)
self.targetDB.commit()
print("Insert Finished")
def replaceDFIntoTable(self,tableNameString:str,dataFrame:pd.DataFrame):
try:
import numpy as np
DBTableColNameList = self.getColNameOfTable(tableNameString)
df = dataFrame[DBTableColNameList]
# convert to tuple
valuesTapleList = df.apply(lambda x: tuple([None if type(i)==type(np.nan) and np.isnan(i) else i for i in x]),axis=1).to_list()
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
colNameString = "".join(["`"+i+"`," for i in self.targetCursor.column_names]).strip(", ")
sql = "REPLACE INTO "+tableNameString+" ("+colNameString+") VALUES (" + "".join(["%s, " for i in range(len(self.targetCursor.column_names))]).strip(", ")+")"
val = valuesTapleList
self.targetCursor.executemany(sql, val)
self.targetDB.commit()
print("Replace Finished")
except Exception as e:
print("Replace Failed, Error:",e)
class oracleTool():
"""
This is the API to connect with oracle database.
"""
def __init__(self,databaseNameString:str,hostAddress:str,port:int,userName:str,passWord:str):
from sqlalchemy import create_engine
uri = f'oracle+cx_oracle://{userName}:{passWord}@{hostAddress}:{port}/{databaseNameString}'
self.engine = create_engine(uri)
def readSql(self,sql:str):
data = pd.read_sql(sql,con=self.engine)
return data
class neo4jTool():
"""
This is the API to connect with neo4j database.
"""
def __init__(self, hostAddress:str,port:int,userName:str,password:str):
from py2neo import Graph
self.engine = Graph(hostAddress+":"+str(port),auth=(userName,password))
def readCypher(self,cypher:str):
data = self.engine.run(cypher)
return data
def convertDataType(self,x):
if isinstance(x,np.float64):
return float(x)
elif hasattr(x,'strftime'):
return x.strftime("%Y-%m-%d")
elif isinstance(x,list):
return [self.convertDataType(i) for i in x]
else:
return x
def updateDFToNode(self,nodeList:list,df:pd.DataFrame,colAsName:str):
nameWaitedToBeUpdated = df[colAsName].to_list()
nameList = [i for i in nodeList if i['name'] in nameWaitedToBeUpdated]
tmp = df.set_index(colAsName,drop=True)
[[node.update({j:self.convertDataType(tmp.loc[node['name']][j])}) for j in tmp.columns if j!= colAsName] for node in nameList]
def convertDFToNode(self, nodeType:str, df:pd.DataFrame, colAsName:str):
from py2neo import Node
nodeList = [Node(nodeType, name=df.iloc[i][colAsName]) for i in range(df.shape[0])]
[[nodeList[i].update({j:self.convertDataType(df.iloc[i][j])}) for j in df.columns if j!=colAsName] for i in range(df.shape[0])]
return nodeList
def addNodeFromDF(self, nodeType:str, df:pd.DataFrame, colAsName:str):
nodeList = self.convertDFToNode(nodeType, df, colAsName)
[self.engine.create(i) for i in nodeList]
return nodeList
def selectAllLabel(self):
labelList = self.readCypher("MATCH (res) RETURN distinct labels(res)")
return [i[0][0] for i in labelList]
def selectAllNode(self, nodeType:str):
nodeList = self.readCypher(f'''MATCH (res:`{nodeType}`) RETURN res''')
return [i['res'] for i in nodeList]
def selectAttrFromNode(self, nodeType:str, attrList:list):
if type(attrList)==type(''):
attrList = [attrList]
else:
pass
attr = "'],res['".join(attrList)
nodeList = self.readCypher(f"MATCH (res:`{nodeType}`) RETURN res['"+attr+"']")
return nodeList.to_data_frame().rename(columns=dict(zip(["res['"+i+"']" for i in attrList],attrList)))
def selectAllNodeWithCondition(self, nodeType: str, conditionString:str, resultVariableName:str = 'res'):
nodeList = self.readCypher(f'''MATCH ({resultVariableName}:`{nodeType}`) WHERE {conditionString} RETURN {resultVariableName}''')
return [i[resultVariableName] for i in nodeList]
def selectAttrFromNodeWithCondition(self, nodeType: str, attrList: list, conditionString:str, resultVariableName:str = 'res'):
if type(attrList) == type(''):
attrList = [attrList]
else:
pass
attr = "'],res['".join(attrList)
nodeList = self.readCypher(f"MATCH ({resultVariableName}:`{nodeType}`) WHERE {conditionString} RETURN {resultVariableName}['" + attr + "']")
return nodeList.to_data_frame().rename(columns=dict(zip([f"{resultVariableName}['" + i + "']" for i in attrList], attrList)))
def connectNodeByAttr(self, nodeTypeLeft:str, nodeTypeRight:str, attrNameLeft:str, attrNameRight:str, relationName:str):
from py2neo import Relationship
leftNode = self.selectAllNode(nodeTypeLeft)
rightNode = self.selectAllNode(nodeTypeRight)
pair = [(left,right) for left in leftNode for right in rightNode if left[attrNameLeft]==right[attrNameRight]]
relation = [Relationship(i[0],relationName,i[1]) for i in pair]
[self.engine.create(i) for i in relation]
def replaceNode(self, nodeObj):
self.engine.push(nodeObj)
def replaceNodeFromDF(self, nodeType:str, df:pd.DataFrame, colAsName:str):
nodeList = self.selectAllNodeWithCondition(nodeType,"res.name IN ['"+"','".join(df[colAsName].to_list())+"']")
self.updateDFToNode(nodeList,df,colAsName)
oldNode = [i['name'] for i in nodeList]
tmp = df[[(i not in oldNode) for i in df[colAsName]]]
self.addNodeFromDF(nodeType,tmp,colAsName)
[self.engine.push(i) for i in nodeList]
def deleteAllNode(self):
self.engine.delete_all()
print("All Nodes Have Been Deleted")
def deleteNode(self, nodeObj):
self.engine.delete(nodeObj) | 40.504587 | 160 | 0.723669 | 8,728 | 0.988448 | 0 | 0 | 0 | 0 | 0 | 0 | 1,284 | 0.145413 |
63947c1901639950a3d0a9a8e03f0f05a930b237 | 270 | py | Python | subset_train.py | sngweicong/DeepCTR-Torch | 67d4e9d0c8a13aa4d614b2d04397a7f6e7a0e9af | [
"Apache-2.0"
] | null | null | null | subset_train.py | sngweicong/DeepCTR-Torch | 67d4e9d0c8a13aa4d614b2d04397a7f6e7a0e9af | [
"Apache-2.0"
] | null | null | null | subset_train.py | sngweicong/DeepCTR-Torch | 67d4e9d0c8a13aa4d614b2d04397a7f6e7a0e9af | [
"Apache-2.0"
] | null | null | null | data_size_plus_header = 1000001
train_dir = 'train'
subset_train_dir = 'sub_train.txt'
fullfile = open(train_dir, 'r')
subfile = open(subset_train_dir,'w')
for i in range(data_size_plus_header):
subfile.write(fullfile.readline())
fullfile.close()
subfile.close()
| 20.769231 | 38 | 0.759259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.103704 |
6396512d9916e7db0610bee423d4d3ccdc2c336e | 12,095 | py | Python | packages/validate_and_forward/lambda_handler.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 4 | 2021-06-25T08:28:54.000Z | 2021-12-16T11:03:42.000Z | packages/validate_and_forward/lambda_handler.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 184 | 2021-06-24T15:27:08.000Z | 2022-03-17T12:44:28.000Z | packages/validate_and_forward/lambda_handler.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 3 | 2021-11-05T10:21:44.000Z | 2022-03-04T14:29:24.000Z | import json
import os
import traceback
from datetime import datetime
from uuid import uuid4
import boto3
from aws.ssm import get_ssm_params
from database import Jobs
from gp_file_parser.parser import parse_gp_extract_file_s3
from jobs.statuses import InputFolderType, InvalidErrorType, JobStatus, ParseStatus
from lr_logging import error, get_cloudlogbase_config, success
from lr_logging.exceptions import InvalidFilename, InvalidGPExtract, InvalidStructure
from mesh import AWSMESHMailbox, get_mesh_mailboxes
from pynamodb.exceptions import PutError, PynamoDBConnectionError, QueryError
from spine_aws_common.lambda_application import LambdaApplication
class ValidateAndForward(LambdaApplication):
"""
Lambda:
Responsible for creating a Job, tracking it DynamoDB and then sending a given input file off to DPS.
If the input file is invalid, then reject
Trigger:
extracts_inbound inputs bucket
"""
def __init__(self):
super().__init__(additional_log_config=get_cloudlogbase_config())
self.job_id = None
self.s3 = boto3.client("s3")
self.mesh_params = get_ssm_params(
self.system_config["MESH_SSM_PREFIX"], self.system_config["AWS_REGION"]
)
def initialise(self):
pass
def start(self):
try:
self.job_id = str(uuid4())
self.input_bucket = self.event["Records"][0]["s3"]["bucket"]["name"]
self.input_file = self.event["Records"][0]["s3"]["object"]["key"]
except KeyError as e:
self.response = error(
"validate_and_forward Lambda tried to access missing key",
self.log_object.internal_id,
error=traceback.format_exc(),
)
raise e
self.create_initial_job()
self.filename, file = self.get_file_contents()
# get gp_file extract stats for job's table
# Also handles rejection if file can't be parsed and validations fails
try:
practice_code, num_of_records = self.parse_gp_extract(
self.input_bucket, self.input_file
)
self.log_object.write_log(
"LRSDI02",
log_row_dict={
"input_file": self.input_file,
"job_id": self.job_id,
},
)
except (InvalidStructure, InvalidGPExtract, InvalidFilename) as exc:
self.update_job_status(
JobStatus.REJECTED.value, ParseStatus.PARSE_FAILED.value
)
message = json.dumps(self.process_invalid_message(exc))
self.handle_rejection(InputFolderType.REJECTED, message)
self.log_object.write_log(
"LRSDW01",
log_row_dict={
"input_file": self.input_file,
"job_id": self.job_id,
"reason": exc,
},
)
self.response = success(
message="Lambda application stopped gp extract input file rejected, rejected file handled",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
reason=exc,
)
return self.response
self.update_job_info(practice_code, num_of_records)
self.send_mesh_file(self.filename, file)
self.update_job_status(JobStatus.SENT_TO_DPS.value)
self.response = success(
message="validate_and_forward file sent",
internal_id=self.log_object.internal_id,
job=self.job_id,
file=self.input_file,
)
def create_initial_job(self):
"""Creates an initial Job item in DynamoDb."""
try:
job_item = Jobs(
self.job_id,
PracticeCode=ParseStatus.NOT_PARSED.value,
FileName=self.input_file,
Timestamp=datetime.now(),
StatusId=JobStatus.PENDING.value,
)
job_item.save()
self.log_object.write_log(
"LRSDI01",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
},
)
except (PynamoDBConnectionError, PutError) as e:
self.log_object.write_log(
"LRSDC01",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"error": e,
},
)
self.response = error(
message="validate_and_forward failed to create a job item before processing the gp_extract file",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
)
raise e
def parse_gp_extract(self, input_bucket: str, input_file: str) -> tuple[str, int]:
"""Handler to process an uploaded S3 object containing a GP flat
file extract
Returns:
extract file data: practice_code & num_of_records
"""
self.upload_date = datetime.now()
input_file_dict = parse_gp_extract_file_s3(
self.s3,
input_bucket,
input_file,
self.upload_date,
)
practice_code = input_file_dict["practice_code"]
num_of_records = len(input_file_dict["records"])
return practice_code, num_of_records
def get_file_contents(self):
filename = os.path.basename(str(self.input_file))
file = (
self.s3.get_object(
Bucket=self.input_bucket,
Key=self.input_file,
)["Body"]
.read()
.decode("utf-8")
)
return filename, file
def send_mesh_file(self, filename: str, file):
listrec_mesh_id, dps_mesh_id = get_mesh_mailboxes(
json.loads(self.mesh_params["mesh_mappings"]),
self.mesh_params["listrec_dps_workflow"],
)
mesh = AWSMESHMailbox(listrec_mesh_id, self.log_object)
mesh.send_message(dps_mesh_id, filename, file, overwrite=True)
self.log_object.write_log(
"LRSDI04",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"dps_mesh_id": dps_mesh_id,
},
)
def update_job_info(self, practice_code: str, num_of_records: int):
"""Creates Job items in DynamoDb.
Args:
practice_code (str): GP practice code of GP extract.
num_of_records (int): Number of records in GP extract.
"""
try:
job = Jobs.IdIndex.query(self.job_id)
for j in job:
j.PracticeCode = practice_code
j.TotalRecords = num_of_records
j.save()
self.log_object.write_log(
"LRSDI03",
log_row_dict={
"upload_filename": self.input_file,
"job_id": self.job_id,
},
)
except (PynamoDBConnectionError, PutError, QueryError) as e:
self.log_object.write_log(
"LRSDC02",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"error": e,
},
)
self.response = error(
message="validate_and_forward failed to update job before sending to DPS",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
)
raise e
def update_job_status(self, status_code: str, practice_code=None):
"""Updates Job Status in DynamoDB"""
try:
job = Jobs.IdIndex.query(self.job_id)
if practice_code is None:
for j in job:
j.StatusId = status_code
j.save()
else:
for j in job:
j.StatusId = status_code
j.PracticeCode = practice_code
j.save()
self.log_object.write_log(
"LRSDI05",
log_row_dict={
"upload_filename": self.input_file,
"job_id": self.job_id,
"status_code": status_code,
},
)
except (PynamoDBConnectionError, PutError, QueryError) as e:
self.log_object.write_log(
"LRSDC03",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"error": e,
},
)
self.response = error(
message="validate_and_forward failed to update job after sending to DPS",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
)
raise e
def cleanup_files(self, bucket: str, key: str):
"""Cleanup file that have already been processed e.g. sent DPS file from s3 or rejected"""
try:
self.s3.delete_object(Bucket=bucket, Key=key)
self.log_object.write_log(
"LRSDI03",
log_row_dict={"key": key, "bucket": bucket},
)
except Exception as e:
self.log_object.write_log(
"LRSDC02",
log_row_dict={"key": key, "bucket": bucket},
)
raise e
def handle_rejection(self, prefix: InputFolderType, error_message: str = None):
"""Handles a rejected GP extract file,
stores input file and log in output bucket
Args:
prefix (str): S3 folder prefix for where to place the handled file
error_message (str): message to handle.
"""
rejection_output_bucket = self.system_config["REJECTION_BUCKET"]
key = f"{prefix.value}{self.filename}"
self.s3.copy_object(
Bucket=rejection_output_bucket,
Key=key,
CopySource={"Bucket": self.input_bucket, "Key": self.input_file},
)
if error_message:
log_filename = f"{self.filename}-RejectedFile-{self.job_id}.json"
log_key = f"{InputFolderType.REJECTED.value}logs/{log_filename}"
self.s3.put_object(
Body=error_message, Bucket=rejection_output_bucket, Key=log_key
)
self.cleanup_files(self.input_bucket, self.input_file)
def process_invalid_message(self, exception: Exception) -> str:
"""Create a formatted error message string based on raised
exception, used to store log files
Args:
exception (Exception): exception raised
Returns:
dict: dictionary of failed file information
"""
rejection_log = {"file": self.input_file, "upload_date": str(self.upload_date)}
if isinstance(exception, InvalidStructure):
error = {
"error_type": InvalidErrorType.STRUCTURE.value,
"message": [exception.args[0]],
}
elif isinstance(exception, InvalidGPExtract):
msg = exception.args[0]
error = {
"error_type": InvalidErrorType.RECORDS.value,
"total_records": msg["total_records"],
"total_invalid_records": len(msg["invalid_records"]),
"message": msg["invalid_records"],
}
elif isinstance(exception, InvalidFilename):
msg = exception.args[0]["message"]
error = {"error_type": InvalidErrorType.FILENAME.value, "message": msg}
rejection_log.update(error)
return rejection_log
| 33.136986 | 113 | 0.556676 | 11,437 | 0.945597 | 0 | 0 | 0 | 0 | 0 | 0 | 2,665 | 0.220339 |
6396bfd17554b072db271a588e4203eb7b87b54a | 1,258 | py | Python | src/ucar/unidata/apps/noaapsd/default.py | mhiley/IDV | bf45573c008f4b1d155d42fbb078bd99578d69d3 | [
"CNRI-Jython"
] | null | null | null | src/ucar/unidata/apps/noaapsd/default.py | mhiley/IDV | bf45573c008f4b1d155d42fbb078bd99578d69d3 | [
"CNRI-Jython"
] | null | null | null | src/ucar/unidata/apps/noaapsd/default.py | mhiley/IDV | bf45573c008f4b1d155d42fbb078bd99578d69d3 | [
"CNRI-Jython"
] | null | null | null | """
NOAA/ESRL/PSD Jython functions
"""
def calcMonAnom(monthly, ltm, normalize=0):
""" Calculate the monthly anomaly from a long term mean.
The number of timesteps in ltm must be 12
"""
from visad import VisADException
monAnom = monthly.clone()
months = len(ltm)
if (not months == 12):
raise VisADException("Number of months in ltm must be a 12")
years = int(len(monthly)/months) +1
startMonth = getStartMonth(GridUtil.getTimeSet(monthly))-1
#print "Start month = " , startMonth
index = 0
for year in range(years):
for month in range(12):
if index > len(monthly) - 1:
break
thisMonth = (startMonth+month)%12
#print thisMonth
diff = sub(monthly[index],ltm[thisMonth])
if normalize != 0:
diff = sub(diff,xav(diff))
diff = GridUtil.setParamType(diff, GridUtil.getParamType(monAnom))
monAnom[index] = diff
index = index + 1
return monAnom
def getStartMonth(timeSet):
""" Get the starting month number (1-12) from a timeset.
"""
from visad.util import DataUtility as du
from visad import DateTime
r = du.getSample(timeSet, 0).getComponent(0)
dt = DateTime(r)
month = dt.formattedString("MM",DateTime.getFormatTimeZone())
return int(month)
| 29.952381 | 74 | 0.670111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.241653 |
639892302eb62bb4521dec46165a447fd1bb4884 | 370 | py | Python | bitmovin_api_sdk/account/organizations/groups/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/account/organizations/groups/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/account/organizations/groups/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | from bitmovin_api_sdk.account.organizations.groups.groups_api import GroupsApi
from bitmovin_api_sdk.account.organizations.groups.tenants.tenants_api import TenantsApi
from bitmovin_api_sdk.account.organizations.groups.invitations.invitations_api import InvitationsApi
from bitmovin_api_sdk.account.organizations.groups.permissions.permissions_api import PermissionsApi
| 74 | 100 | 0.905405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6398ccc3c667be7698d1809b95c494572f689685 | 1,268 | py | Python | 08.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
] | null | null | null | 08.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
] | null | null | null | 08.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
] | null | null | null | long_number = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
list_mults = []
for i in range(0, 988):
section = long_number[0+i:13+i]
cumulative_mult = 1
for j in section:
cumulative_mult = cumulative_mult * int(j)
list_mults.append(cumulative_mult)
print(max(list_mults)) # 23514624000 | 105.666667 | 1,016 | 0.932177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,015 | 0.800473 |
639a14e502ea586296e65639c8a98f05735475db | 272 | py | Python | app/views.py | we-race-here/wrh-brac | 6dcd6bb23c86f98896127c89b24e6dc30af9648a | [
"MIT"
] | null | null | null | app/views.py | we-race-here/wrh-brac | 6dcd6bb23c86f98896127c89b24e6dc30af9648a | [
"MIT"
] | 9 | 2022-01-13T13:11:03.000Z | 2022-01-17T19:12:17.000Z | app/views.py | we-race-here/wrh-brac | 6dcd6bb23c86f98896127c89b24e6dc30af9648a | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.views.generic import TemplateView
from . import models, serializers
class HomeView(TemplateView):
template_name = 'Data.html'
class FrontView(TemplateView):
template_name = 'index.html'
| 20.923077 | 45 | 0.775735 | 124 | 0.455882 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.176471 |
639b330d716e55cdf8a6ae33248c52fa4f302a3c | 755 | py | Python | perceptron/gen-data.py | KellyHwong/MIT-ML | 0305208956f89cb039292c7cce175852f0783336 | [
"MIT"
] | 15 | 2020-02-03T20:28:36.000Z | 2021-10-05T14:11:56.000Z | perceptron/gen-data.py | KellyHwong/MIT-ML | 0305208956f89cb039292c7cce175852f0783336 | [
"MIT"
] | null | null | null | perceptron/gen-data.py | KellyHwong/MIT-ML | 0305208956f89cb039292c7cce175852f0783336 | [
"MIT"
] | 10 | 2019-08-13T12:54:54.000Z | 2021-02-10T20:24:43.000Z | import numpy as np
import random
N = 10
def null(a, rtol=1e-5):
u, s, v = np.linalg.svd(a)
rank = (s > rtol*s[0]).sum()
return rank, v[rank:].T.copy()
def gen_data(N, noisy=False):
lower = -1
upper = 1
dim = 2
X = np.random.rand(dim, N)*(upper-lower)+lower
while True:
Xsample = np.concatenate(
(np.ones((1, dim)), np.random.rand(dim, dim)*(upper-lower)+lower))
k, w = null(Xsample.T)
y = np.sign(np.dot(w.T, np.concatenate((np.ones((1, N)), X))))
if np.all(y):
break
return (X, y, w)
def change_label(y):
idx = random.sample(range(1, N), N/10)
y[idx] = -y[idx]
return y
if __name__ == '__main__':
X, y, w = gen_data(10)
print(X)
| 18.414634 | 78 | 0.531126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013245 |
639db7a427799c489966370121dcda273bf83fd4 | 4,431 | py | Python | forgot_password/tests/test_settings.py | oursky/forgot_password | 9afde8b9d39a2837676628f12c9b6f2c45da592a | [
"Apache-2.0"
] | 1 | 2017-02-09T10:17:50.000Z | 2017-02-09T10:17:50.000Z | forgot_password/tests/test_settings.py | oursky/forgot_password | 9afde8b9d39a2837676628f12c9b6f2c45da592a | [
"Apache-2.0"
] | 54 | 2016-09-07T11:01:32.000Z | 2020-02-12T06:15:43.000Z | forgot_password/tests/test_settings.py | oursky/forgot_password | 9afde8b9d39a2837676628f12c9b6f2c45da592a | [
"Apache-2.0"
] | 14 | 2016-09-20T05:36:49.000Z | 2019-04-02T15:42:37.000Z | # Copyright 2018 Oursky Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest.mock import patch
from .. import get_verify_settings_parser, providers
class MockProvider1:
@classmethod
def configure_parser(cls, key, parser):
parser.add_setting('mock_api_key', atype=str, required=True)
class MockProvider2:
@classmethod
def configure_parser(cls, key, parser):
parser.add_setting('mock_auth_token', atype=str, required=True)
SETTINGS_PHONE_MOCK = {
'VERIFY_AUTO_UPDATE': 'true',
'VERIFY_AUTO_SEND_SIGNUP': 'true',
'VERIFY_AUTO_SEND_UPDATE': 'true',
'VERIFY_REQUIRED': 'true',
'VERIFY_CRITERIA': 'any',
'VERIFY_ERROR_REDIRECT': 'http://example.com/error_redirect',
'VERIFY_ERROR_HTML_URL': 'http://example.com/error_html_url',
'VERIFY_KEYS': 'phone',
'VERIFY_KEYS_PHONE_CODE_FORMAT': 'numeric',
'VERIFY_KEYS_PHONE_SUCCESS_REDIRECT':
'http://example.com/success_redirect',
'VERIFY_KEYS_PHONE_ERROR_REDIRECT':
'http://example.com/error_redirect',
'VERIFY_KEYS_PHONE_SUCCESS_HTML_URL':
'http://example.com/success_html_url',
'VERIFY_KEYS_PHONE_ERROR_HTML_URL':
'http://example.com/error_html_url',
'VERIFY_KEYS_PHONE_PROVIDER': 'mock',
'VERIFY_KEYS_PHONE_PROVIDER_MOCK_API_KEY': 'some-api-key',
}
SETTINGS_PHONE_EMAIL_MOCK = {
'VERIFY_KEYS': 'phone,email',
'VERIFY_KEYS_PHONE_CODE_FORMAT': 'numeric',
'VERIFY_KEYS_PHONE_EXPIRY': '60',
'VERIFY_KEYS_PHONE_PROVIDER': 'mock1',
'VERIFY_KEYS_PHONE_PROVIDER_MOCK_API_KEY': 'some-api-key',
'VERIFY_KEYS_EMAIL_CODE_FORMAT': 'complex',
'VERIFY_KEYS_EMAIL_EXPIRY': '30',
'VERIFY_KEYS_EMAIL_PROVIDER': 'mock2',
'VERIFY_KEYS_EMAIL_PROVIDER_MOCK_AUTH_TOKEN': 'some-auth-token',
}
class TestVerifySettingsParser(unittest.TestCase):
@patch.dict(os.environ, SETTINGS_PHONE_MOCK)
@patch.dict(providers._providers, {'mock': MockProvider1}, clear=True)
def test_single_key(self):
parser = get_verify_settings_parser()
ns = parser.parse_settings()
assert ns.auto_update
assert ns.auto_send_signup
assert ns.auto_send_update
assert ns.required
assert ns.criteria == 'any'
assert ns.error_redirect == 'http://example.com/error_redirect'
assert ns.error_html_url == 'http://example.com/error_html_url'
assert set(ns.keys.keys()) == set(['phone'])
assert ns.keys['phone'].code_format == 'numeric'
assert ns.keys['phone'].success_redirect == \
'http://example.com/success_redirect'
assert ns.keys['phone'].error_redirect == \
'http://example.com/error_redirect'
assert ns.keys['phone'].success_html_url == \
'http://example.com/success_html_url'
assert ns.keys['phone'].error_html_url == \
'http://example.com/error_html_url'
assert ns.keys['phone'].provider.name == 'mock'
assert ns.keys['phone'].provider.mock_api_key == 'some-api-key'
@patch.dict(os.environ, SETTINGS_PHONE_EMAIL_MOCK)
@patch.dict(providers._providers,
{'mock1': MockProvider1, 'mock2': MockProvider2},
clear=True)
def test_multiple_keys(self):
parser = get_verify_settings_parser()
ns = parser.parse_settings()
assert set(ns.keys.keys()) == set(['phone', 'email'])
assert ns.keys['phone'].code_format == 'numeric'
assert ns.keys['phone'].expiry == 60
assert ns.keys['phone'].provider.name == 'mock1'
assert ns.keys['phone'].provider.mock_api_key == 'some-api-key'
assert ns.keys['email'].code_format == 'complex'
assert ns.keys['email'].expiry == 30
assert ns.keys['email'].provider.name == 'mock2'
assert ns.keys['email'].provider.mock_auth_token == 'some-auth-token'
| 39.918919 | 77 | 0.679982 | 2,379 | 0.536899 | 0 | 0 | 2,268 | 0.511848 | 0 | 0 | 2,084 | 0.470323 |
639f69e88bb9e0f52e4c78779eac5ba9a648e432 | 9,213 | py | Python | cosa/analyzers/bmc_temporal.py | zsisco/CoSA | b7a5107fcbae9b3ed3726fbcf9240b39252ef551 | [
"BSD-3-Clause"
] | 52 | 2018-02-26T19:01:03.000Z | 2022-02-24T08:30:00.000Z | cosa/analyzers/bmc_temporal.py | zsisco/CoSA | b7a5107fcbae9b3ed3726fbcf9240b39252ef551 | [
"BSD-3-Clause"
] | 28 | 2018-06-07T22:18:23.000Z | 2020-10-27T01:21:01.000Z | cosa/analyzers/bmc_temporal.py | zsisco/CoSA | b7a5107fcbae9b3ed3726fbcf9240b39252ef551 | [
"BSD-3-Clause"
] | 12 | 2018-08-16T20:02:46.000Z | 2021-01-20T18:17:45.000Z | # Copyright 2018 Cristian Mattarei
#
# Licensed under the modified BSD (3-clause BSD) License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import math
from pysmt.shortcuts import BV, And, Or, Solver, TRUE, FALSE, Not, EqualsOrIff, Implies, Iff, Symbol, BOOL, simplify, BVAdd, BVUGE
from pysmt.smtlib.printers import SmtPrinter, SmtDagPrinter
from pysmt.typing import BOOL
from cosa.utils.logger import Logger
from cosa.utils.formula_mngm import substitute, get_free_variables
from cosa.representation import TS, HTS
from cosa.encoders.coreir import CoreIRParser, SEP
from cosa.printers.template import HIDDEN_VAR
from cosa.analyzers.mcsolver import VerificationStrategy
from cosa.problem import VerificationStatus
from cosa.analyzers.mcsolver import TraceSolver, BMCSolver
NL = "\n"
EQVAR = HIDDEN_VAR+"eq_var"+HIDDEN_VAR[::-1]
HEQVAR = HIDDEN_VAR+"heq_var"+HIDDEN_VAR[::-1]
class BMCTemporal(BMCSolver):
hts = None
config = None
TraceID = 0
total_time = 0.0
def __init__(self, hts, config):
BMCSolver.__init__(self, hts, config)
def solve_liveness(self, hts, prop, k, k_min=0, eventually=False, lemmas=None):
if lemmas is not None:
(hts, res) = self.add_lemmas(hts, prop, lemmas)
if res:
Logger.log("Lemmas imply the property", 1)
Logger.log("", 0, not(Logger.level(1)))
return (0, True)
if self.config.incremental:
return self.solve_liveness_inc(hts, prop, k, k_min, eventually)
return self.solve_liveness_fwd(hts, prop, k)
def solve_liveness_inc(self, hts, prop, k, k_min, eventually=False):
if self.config.strategy in [VerificationStrategy.FWD, VerificationStrategy.AUTO]:
return self.solve_liveness_inc_fwd(hts, prop, k, k_min, eventually)
Logger.error("Invalid configuration strategy")
return None
def solve_liveness_inc_fwd(self, hts, prop, k, k_min, eventually=False):
self._reset_assertions(self.solver)
init = hts.single_init()
trans = hts.single_trans()
invar = hts.single_invar()
if self.config.simplify:
Logger.log("Simplifying the Transition System", 1)
if Logger.level(1):
timer = Logger.start_timer("Simplify")
init = simplify(init)
trans = simplify(trans)
invar = simplify(invar)
if Logger.level(1):
Logger.get_timer(timer)
heqvar = None
if not eventually:
heqvar = Symbol(HEQVAR, BOOL)
self._init_at_time(hts.vars.union(set([heqvar])), k)
if self.config.prove:
self.solver_klive = self.solver.copy("klive")
self._reset_assertions(self.solver_klive)
self._add_assertion(self.solver_klive, self.at_time(invar, 0))
if eventually:
self._add_assertion(self.solver_klive, self.at_time(init, 0))
propt = FALSE()
formula = And(init, invar)
formula = self.at_time(formula, 0)
Logger.log("Add init and invar", 2)
self._add_assertion(self.solver, formula)
next_prop = TS.has_next(prop)
if next_prop:
if k < 1:
Logger.error("Liveness checking with next variables requires at least k=1")
k_min = 1
t = 0
while (t < k+1):
self._push(self.solver)
loopback = FALSE()
if t > 0:
loopback = self.all_loopbacks(self.hts.vars, t, heqvar)
Logger.log("Add loopbacks at time %d"%t, 2)
self._add_assertion(self.solver, loopback)
if t >= k_min:
self._write_smt2_comment(self.solver, "Solving for k=%s"%(t))
Logger.log("\nSolving for k=%s"%(t), 1)
if self._solve(self.solver):
Logger.log("Counterexample found with k=%s"%(t), 1)
model = self._get_model(self.solver)
return (t, model)
else:
Logger.log("No counterexample found with k=%s"%(t), 1)
Logger.msg(".", 0, not(Logger.level(1)))
else:
Logger.log("Skipping solving for k=%s (k_min=%s)"%(t,k_min), 1)
Logger.msg(".", 0, not(Logger.level(1)))
self._pop(self.solver)
n_prop = Not(prop)
if not eventually:
n_prop = Or(n_prop, Not(heqvar))
if next_prop:
if t > 0:
propt = self.at_time(n_prop, t-1)
else:
propt = self.at_time(n_prop, t)
self._add_assertion(self.solver, propt)
if self.config.prove:
if t > 0:
self._add_assertion(self.solver_klive, trans_t)
self._write_smt2_comment(self.solver_klive, "Solving for k=%s"%(t))
if next_prop:
if t > 0:
propt = self.at_time(Not(prop), t-1)
else:
propt = self.at_time(Not(prop), t)
self._add_assertion(self.solver_klive, propt)
if t >= k_min:
if self._solve(self.solver_klive):
Logger.log("K-Liveness failed with k=%s"%(t), 1)
else:
Logger.log("K-Liveness holds with k=%s"%(t), 1)
return (t, True)
else:
self._add_assertion(self.solver_klive, self.at_time(Not(prop), 0))
# self._push(self.solver_klive)
# self._add_assertion(self.solver_klive, self.at_time(prop, 0))
# res = self._solve(self.solver_klive)
# self._pop(self.solver_klive)
# if res:
# self._add_assertion(self.solver_klive, self.at_time(prop, 0))
# else:
# self._add_assertion(self.solver_klive, self.at_time(Not(prop), 0))
trans_t = self.unroll(trans, invar, t+1, t)
self._add_assertion(self.solver, trans_t)
if self.assert_property:
prop_t = self.unroll(TRUE(), prop, t, t-1)
self._add_assertion(self.solver, prop_t)
Logger.log("Add property at time %d"%t, 2)
t += 1
return (t-1, None)
def all_loopbacks(self, vars, k, heqvar=None):
lvars = list(vars)
vars_k = [TS.get_timed(v, k) for v in lvars]
loopback = FALSE()
eqvar = None
heqvars = None
if heqvar is not None:
eqvar = Symbol(EQVAR, BOOL)
heqvars = []
peqvars = FALSE()
for i in range(k):
vars_i = [TS.get_timed(v, i) for v in lvars]
eq_k_i = And([EqualsOrIff(vars_k[j], vars_i[j]) for j in range(len(lvars))])
if heqvar is not None:
eqvar_i = TS.get_timed(eqvar, i)
peqvars = Or(peqvars, eqvar_i)
eq_k_i = And(eqvar_i, Iff(eqvar_i, eq_k_i))
heqvars.append(Iff(TS.get_timed(heqvar, i), peqvars))
loopback = Or(loopback, eq_k_i)
if heqvar is not None:
loopback = And(loopback, And(heqvars))
return loopback
def liveness(self, prop, k, k_min):
lemmas = self.hts.lemmas
self._init_at_time(self.hts.vars, k)
(t, model) = self.solve_liveness(self.hts, prop, k, k_min, False, lemmas)
model = self._remap_model(self.hts.vars, model, t)
if model == True:
return (VerificationStatus.TRUE, None, t)
elif model is not None:
trace = self.generate_trace(model, t, get_free_variables(prop), find_loop=True)
return (VerificationStatus.FALSE, trace, t)
else:
return (VerificationStatus.UNK, None, t)
def eventually(self, prop, k, k_min):
lemmas = self.hts.lemmas
self._init_at_time(self.hts.vars, k)
(t, model) = self.solve_liveness(self.hts, prop, k, k_min, True, lemmas)
model = self._remap_model(self.hts.vars, model, t)
if model == True:
return (VerificationStatus.TRUE, None, t)
elif model is not None:
trace = self.generate_trace(model, t, get_free_variables(prop), find_loop=True)
return (VerificationStatus.FALSE, trace, t)
else:
return (VerificationStatus.UNK, None, t)
| 35.709302 | 130 | 0.551069 | 8,036 | 0.872246 | 0 | 0 | 0 | 0 | 0 | 0 | 1,211 | 0.131445 |
63a0482e773f0b1712ad551a2e29a76da9779874 | 6,767 | py | Python | sdk/python/pulumi_azure/blueprint/get_published_version.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/blueprint/get_published_version.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/blueprint/get_published_version.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetPublishedVersionResult',
'AwaitableGetPublishedVersionResult',
'get_published_version',
]
@pulumi.output_type
class GetPublishedVersionResult:
"""
A collection of values returned by getPublishedVersion.
"""
def __init__(__self__, blueprint_name=None, description=None, display_name=None, id=None, last_modified=None, scope_id=None, target_scope=None, time_created=None, type=None, version=None):
if blueprint_name and not isinstance(blueprint_name, str):
raise TypeError("Expected argument 'blueprint_name' to be a str")
pulumi.set(__self__, "blueprint_name", blueprint_name)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", last_modified)
if scope_id and not isinstance(scope_id, str):
raise TypeError("Expected argument 'scope_id' to be a str")
pulumi.set(__self__, "scope_id", scope_id)
if target_scope and not isinstance(target_scope, str):
raise TypeError("Expected argument 'target_scope' to be a str")
pulumi.set(__self__, "target_scope", target_scope)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="blueprintName")
def blueprint_name(self) -> str:
return pulumi.get(self, "blueprint_name")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the Blueprint Published Version
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The display name of the Blueprint Published Version
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
return pulumi.get(self, "last_modified")
@property
@pulumi.getter(name="scopeId")
def scope_id(self) -> str:
return pulumi.get(self, "scope_id")
@property
@pulumi.getter(name="targetScope")
def target_scope(self) -> str:
"""
The target scope
"""
return pulumi.get(self, "target_scope")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
return pulumi.get(self, "time_created")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the Blueprint
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> str:
return pulumi.get(self, "version")
class AwaitableGetPublishedVersionResult(GetPublishedVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPublishedVersionResult(
blueprint_name=self.blueprint_name,
description=self.description,
display_name=self.display_name,
id=self.id,
last_modified=self.last_modified,
scope_id=self.scope_id,
target_scope=self.target_scope,
time_created=self.time_created,
type=self.type,
version=self.version)
def get_published_version(blueprint_name: Optional[str] = None,
scope_id: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPublishedVersionResult:
"""
Use this data source to access information about an existing Blueprint Published Version
> **NOTE:** Azure Blueprints are in Preview and potentially subject to breaking change without notice.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
current = azure.core.get_subscription()
test = azure.blueprint.get_published_version(scope_id=current.id,
blueprint_name="exampleBluePrint",
version="dev_v2.3")
```
:param str blueprint_name: The name of the Blueprint Definition
:param str scope_id: The ID of the Management Group / Subscription where this Blueprint Definition is stored.
:param str version: The Version name of the Published Version of the Blueprint Definition
"""
__args__ = dict()
__args__['blueprintName'] = blueprint_name
__args__['scopeId'] = scope_id
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:blueprint/getPublishedVersion:getPublishedVersion', __args__, opts=opts, typ=GetPublishedVersionResult).value
return AwaitableGetPublishedVersionResult(
blueprint_name=__ret__.blueprint_name,
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
last_modified=__ret__.last_modified,
scope_id=__ret__.scope_id,
target_scope=__ret__.target_scope,
time_created=__ret__.time_created,
type=__ret__.type,
version=__ret__.version)
| 36.578378 | 192 | 0.661741 | 4,368 | 0.645485 | 493 | 0.072854 | 3,780 | 0.558593 | 0 | 0 | 2,299 | 0.339737 |
63a05e17f1457914002ddf440d5d635997035ff1 | 2,952 | py | Python | basetrainer/scheduler/MultiStepLR.py | PanJinquan/pytorch-base-trainer | 37799c948f72b2f9d3771ff469e06cdbff4a1d07 | [
"MIT"
] | 11 | 2022-01-18T10:07:52.000Z | 2022-03-16T02:40:31.000Z | basetrainer/scheduler/MultiStepLR.py | PanJinquan/pytorch-base-trainer | 37799c948f72b2f9d3771ff469e06cdbff4a1d07 | [
"MIT"
] | null | null | null | basetrainer/scheduler/MultiStepLR.py | PanJinquan/pytorch-base-trainer | 37799c948f72b2f9d3771ff469e06cdbff4a1d07 | [
"MIT"
] | 1 | 2022-01-26T06:31:29.000Z | 2022-01-26T06:31:29.000Z | # -*-coding: utf-8 -*-
"""
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2021-07-28 15:32:44
"""
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
from .WarmUpLR import WarmUpLR
from ..callbacks.callbacks import Callback
class MultiStepLR(Callback):
def __init__(self,
optimizer,
epochs,
num_steps,
milestones,
lr_init=0.01,
num_warn_up=0,
gamma=0.1):
"""
a cosine decay scheduler about steps, not epochs.
:param optimizer: ex. optim.SGD
:param epochs:
:param num_steps: 一个epoch的迭代次数,len(self.train_dataloader)
:param milestones: (list): List of epoch indices. Must be increasing.
:param lr_init: lr_max is init lr.
:param num_warn_up:
:param gamma (float): Multiplicative factor of learning rate decay.Default: 0.1.
"""
self.optimizer = optimizer
self.epochs = epochs
self.num_steps = num_steps
self.max_step = epochs * self.num_steps
self.lr_init = lr_init
self.milestones = milestones
self.milestones.sort()
self.lr_list = [lr_init * gamma ** decay for decay in range(0, len(self.milestones) + 1)]
self.epoch = 0
self.warm_up = WarmUpLR(optimizer,
num_steps=self.num_steps,
lr_init=lr_init,
num_warn_up=num_warn_up)
super(MultiStepLR, self).__init__()
def get_lr(self, epoch, milestones, lr_list):
lr = self.optimizer.param_groups[0]["lr"]
max_stages = milestones[-1]
if epoch < max_stages:
for index in range(len(milestones)):
if epoch < milestones[index]:
lr = lr_list[index]
break
elif epoch >= max_stages:
lr = lr_list[-1]
return lr
def set_lr(self, lr):
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def set_milestones_lr(self, epoch, milestones, lr_list):
'''
:param epoch:
:param milestones: [ 35, 65, 95, 150]
:param lr_list: [0.1, 0.01, 0.001, 0.0001, 0.00001]
:return:
'''
lr = self.get_lr(epoch, milestones, lr_list)
self.set_lr(lr)
def on_epoch_begin(self, epoch, logs: dict = {}):
self.epoch = epoch
self.set_milestones_lr(epoch, self.milestones, self.lr_list)
def on_batch_end(self, batch, logs: dict = {}):
self.step(epoch=self.epoch, step=batch)
def step(self, epoch=0, step=0):
# total_step = self.num_steps * epoch + step
# step每次迭代都会调用,比较耗时,建议与step无关的操作放在on_epoch_begin中
# self.set_milestones_lr(epoch, self.milestones, self.lr_list)
self.warm_up.step(epoch, step)
| 33.545455 | 97 | 0.572493 | 2,739 | 0.907555 | 0 | 0 | 0 | 0 | 0 | 0 | 952 | 0.315441 |
63a0b6c965482e6900cbc411399d6b33e24bcb3d | 1,232 | py | Python | common/inspect.py | skying0527/pull-Demo | 5744e31e22ca27d43e04bfbce37e7af251a0fe76 | [
"Apache-2.0"
] | null | null | null | common/inspect.py | skying0527/pull-Demo | 5744e31e22ca27d43e04bfbce37e7af251a0fe76 | [
"Apache-2.0"
] | null | null | null | common/inspect.py | skying0527/pull-Demo | 5744e31e22ca27d43e04bfbce37e7af251a0fe76 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
import yaml
from tools.times import timestamp
from config.conf import ELEMENT_PATH, LOCATE_MODE
def inspect_element():
"""审查所有的元素是否正确"""
start_time = timestamp()
for i in os.listdir(ELEMENT_PATH):
_path = os.path.join(ELEMENT_PATH, i)
if os.path.isfile(_path):
with open(_path, encoding='utf-8') as f:
data = yaml.safe_load(f)
for k in data.values():
pattern, value = k.split('==')
if pattern not in LOCATE_MODE:
raise AttributeError('【%s】路径中【%s]元素没有指定类型' % (i, k))
if pattern == 'xpath':
assert '//' in value, '【%s】路径中【%s]元素xpath类型与值不配' % (
i, k)
if pattern == 'css':
assert '//' not in value, '【%s】路径中【%s]元素css类型与值不配' % (
i, k)
if pattern in ('id', 'name', 'class'):
assert value, '【%s】路径中【%s]元素类型与值不匹配' % (i, k)
end_time = timestamp()
print("校验元素done!用时%.3f秒!" % (end_time - start_time))
if __name__ == '__main__':
inspect_element()
| 35.2 | 78 | 0.491071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.277778 |
63a0d5586584f64ab1bee33fc68bb0c0fc21fe84 | 7,201 | py | Python | client/starwhale/swds/store.py | goldenxinxing/starwhale | 2fbb72b0ce5e135b432120c779440e53942be3b5 | [
"Apache-2.0"
] | 1 | 2022-03-24T02:03:48.000Z | 2022-03-24T02:03:48.000Z | client/starwhale/swds/store.py | goldenxinxing/starwhale | 2fbb72b0ce5e135b432120c779440e53942be3b5 | [
"Apache-2.0"
] | null | null | null | client/starwhale/swds/store.py | goldenxinxing/starwhale | 2fbb72b0ce5e135b432120c779440e53942be3b5 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
import sys
import yaml
import typing as t
import click
import requests
from rich.panel import Panel
from rich.pretty import Pretty
from fs import open_fs
from starwhale.base.store import LocalStorage
from starwhale.consts import (
DEFAULT_DATASET_YAML_NAME,
DEFAULT_MANIFEST_NAME,
SHORT_VERSION_CNT,
SW_API_VERSION,
)
from starwhale.utils.http import wrap_sw_error_resp, upload_file
from starwhale.utils.fs import empty_dir
from starwhale.utils import pretty_bytes
from starwhale.utils.error import NotFoundError
# TODO: refactor Dataset and ModelPackage LocalStorage
class _UploadPhase:
MANIFEST = "MANIFEST"
BLOB = "BLOB"
END = "END"
CANCEL = "CANCEL"
class DataSetLocalStore(LocalStorage):
def list(
self,
filter: str = "",
title: str = "",
caption: str = "",
fullname: bool = False,
) -> None:
super().list(
filter=filter,
title="List dataset(swds) in local storage",
caption=f"@{self.dataset_dir}",
fullname=fullname,
)
def iter_local_swobj(self) -> t.Generator[LocalStorage.SWobjMeta, None, None]:
from .dataset import ARCHIVE_SWDS_META
if not self.dataset_dir.exists():
return
_fs = open_fs(str(self.dataset_dir.resolve()))
for name_dir in _fs.scandir("."):
if not name_dir.is_dir:
continue
for ver_dir in _fs.opendir(name_dir.name).scandir("."):
# TODO: add more validator
if not ver_dir.is_dir:
continue
_path = self.dataset_dir / name_dir.name / ver_dir.name
if not all(
[
(_path / n).exists()
for n in (
DEFAULT_MANIFEST_NAME,
DEFAULT_DATASET_YAML_NAME,
ARCHIVE_SWDS_META,
)
]
):
continue
with (_path / DEFAULT_MANIFEST_NAME).open("r") as f:
_manifest = yaml.safe_load(f)
# TODO: support dataset tag cmd
_tag = ver_dir.name[:SHORT_VERSION_CNT]
yield LocalStorage.SWobjMeta(
name=name_dir.name,
version=ver_dir.name,
tag=_tag,
environment=_manifest["dep"]["env"],
size=pretty_bytes(_manifest.get("dataset_byte_size", 0)),
generate="",
created=_manifest["created_at"],
)
def push(self, sw_name: str, project: str = "", force: bool = False) -> None:
url = f"{self.sw_remote_addr}/api/{SW_API_VERSION}/project/dataset/push"
_name, _version = self._parse_swobj(sw_name)
_dir, _ = self._guess(self.dataset_dir / _name, _version)
if not _dir.exists():
self._console.print(
f"[red]failed to push {sw_name}[/], because of {_dir} not found"
)
sys.exit(1)
# TODO: refer to docker push
self._console.print(" :fire: try to push swds...")
_manifest_path = _dir / DEFAULT_MANIFEST_NAME
_swds = f"{_name}:{_dir.name}"
_headers = {"Authorization": self._sw_token}
# TODO: use rich progress
r = upload_file(
url=url,
fpath=_manifest_path,
fields={
"swds": _swds,
"phase": _UploadPhase.MANIFEST,
"project": project,
"force": "1" if force else "0",
},
headers=_headers,
exit=True,
)
self._console.print(f"\t :arrow_up: {DEFAULT_MANIFEST_NAME} :ok:")
upload_id = r.json().get("data", {}).get("upload_id")
if not upload_id:
raise Exception("get invalid upload_id")
_headers["X-SW-UPLOAD-ID"] = upload_id
_manifest = yaml.safe_load(_manifest_path.open())
# TODO: add retry deco
def _upload_blob(_fp: Path) -> None:
if not _fp.exists():
raise NotFoundError(f"{_fp} not found")
upload_file(
url=url,
fpath=_fp,
fields={
"swds": _swds,
"phase": _UploadPhase.BLOB,
},
headers=_headers,
use_raise=True,
)
self._console.print(f"\t :arrow_up: {_fp.name} :ok:")
# TODO: parallel upload
try:
from .dataset import ARCHIVE_SWDS_META
for p in [_dir / "data" / n for n in _manifest["signature"]] + [
_dir / ARCHIVE_SWDS_META
]:
_upload_blob(p)
except Exception as e:
self._console.print(
f"when upload blobs, we meet Exception{e}, will cancel upload"
)
r = requests.post(
url,
data={"swds": _swds, "project": project, "phase": _UploadPhase.CANCEL},
headers=_headers,
)
wrap_sw_error_resp(r, "cancel", use_raise=True)
else:
self._console.print(" :clap: :clap: all uploaded.")
r = requests.post(
url,
data={"swds": _swds, "project": project, "phase": _UploadPhase.END},
headers=_headers,
)
wrap_sw_error_resp(r, "end", use_raise=True)
def pull(self, sw_name: str) -> None:
...
def info(self, sw_name: str) -> None:
_manifest = self._do_get_info(*self._parse_swobj(sw_name))
_config_panel = Panel(
Pretty(_manifest, expand_all=True),
title="inspect _manifest.yaml and dataset.yaml info",
)
self._console.print(_config_panel)
# TODO: show dataset dir tree view
def _do_get_info(self, _name: str, _version: str) -> t.Dict[t.Any, t.Any]:
_dir, _ = self._guess(self.dataset_dir / _name, _version)
if not _dir.exists():
raise NotFoundError(f"{_dir} is not existed")
_manifest: t.Dict[str, t.Any] = {}
_manifest = yaml.safe_load((_dir / DEFAULT_MANIFEST_NAME).open())
_dataset = yaml.safe_load((_dir / DEFAULT_DATASET_YAML_NAME).open())
_manifest.update(_dataset)
_manifest["dataset_dir"] = str(_dir.resolve())
return _manifest
def delete(self, sw_name: str) -> None:
_name, _version = self._parse_swobj(sw_name)
_dir, _ = self._guess(self.dataset_dir / _name, _version)
if _dir.exists() and _dir.is_dir():
click.confirm(f"continue to delete {_dir}?", abort=True)
empty_dir(_dir)
self._console.print(f":bomb delete dataset dir: {_dir}")
else:
self._console.print(
f":diving_mask: not found or no dir for {_dir}, skip to delete it"
)
def gc(self, dry_run: bool = False) -> None:
# TODO: remove intermediated dataset dir
...
| 33.337963 | 87 | 0.539786 | 6,582 | 0.91404 | 1,600 | 0.222191 | 0 | 0 | 0 | 0 | 1,205 | 0.167338 |
63a2bc2fc9589a6a626eb31088dda639a15a9368 | 2,578 | py | Python | stx-metrics/footprint/tests/send_data_test.py | gaponcec/tools-contrib | 3e0d14040eec54de969dee22919c4d54c4d7c630 | [
"Apache-2.0"
] | 1 | 2019-03-25T19:21:57.000Z | 2019-03-25T19:21:57.000Z | stx-metrics/footprint/tests/send_data_test.py | gaponcec/tools-contrib | 3e0d14040eec54de969dee22919c4d54c4d7c630 | [
"Apache-2.0"
] | 3 | 2019-04-03T01:45:24.000Z | 2019-07-25T15:22:31.000Z | stx-metrics/footprint/tests/send_data_test.py | gaponcec/tools-contrib | 3e0d14040eec54de969dee22919c4d54c4d7c630 | [
"Apache-2.0"
] | 7 | 2019-03-25T18:53:44.000Z | 2020-02-18T09:17:03.000Z | #!/usr/bin/env python
__author__ = "Mario Carrillo"
import random
import time
import argparse
from influxdb import InfluxDBClient
INFLUX_SERVER = ""
INFLUX_PORT = ""
INFLUX_PASS = ""
INFLUX_USER = ""
def send_data(json_file):
client = InfluxDBClient(INFLUX_SERVER, INFLUX_PORT,
INFLUX_USER, INFLUX_PASS, 'starlingx')
if client.write_points(json_file):
print("Data inserted successfully")
else:
print("Error during data insertion")
return client
def check_data(client,table):
query = "select value from %s;" % (table)
result = client.query(query)
print("%s contains:" % table)
print(result)
def main():
global INFLUX_SERVER
global INFLUX_PORT
global INFLUX_PASS
global INFLUX_USER
parser = argparse.ArgumentParser()
parser.add_argument('--server',\
help='addres of the influxdb server')
parser.add_argument('--port',\
help='port of the influxdb server')
parser.add_argument('--user',\
help='user of the influxdb server')
parser.add_argument('--password',\
help='password of the influxdb server')
args = parser.parse_args()
if args.server:
INFLUX_SERVER = args.server
if args.port:
INFLUX_PORT = args.port
if args.password:
INFLUX_PASS = args.password
if args.user:
INFLUX_USER = args.password
# Table information
table = "vm_metrics"
test_name = "vm_boottime"
test_units = "ms"
# Data to be inserted
current_date = time.strftime("%c")
value = round(random.uniform(0.1, 10),2)
json_file = [
{
"measurement": table,
"time": current_date,
"fields": {
"test" : test_name,
"unit": test_units,
"value": value
}
}
]
if INFLUX_SERVER and INFLUX_PORT and INFLUX_PASS and INFLUX_USER:
client = send_data(json_file)
check_data(client,table)
time.sleep(10)
current_date = time.strftime("%c")
test_name = "vm_boottime_2"
value = round(random.uniform(0.1, 10),2)
json_file = [
{
"measurement": table,
"time": current_date,
"fields": {
"test" : test_name,
"unit": test_units,
"value": value
}
}
]
client = send_data(json_file)
check_data(client,table)
if __name__ == '__main__':
main()
| 24.320755 | 70 | 0.572149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.1955 |
63a41385fdcc3fd156087362665083c0f3d04e78 | 1,767 | py | Python | roll_the_dice/cli.py | pluralsight/tech-blog-roll-the-dice | 68ec0aa01f6becaca1ab72e8aff1141e8f253eaa | [
"Apache-2.0"
] | 11 | 2020-02-20T22:33:39.000Z | 2022-03-23T11:57:50.000Z | roll_the_dice/cli.py | pluralsight/tech-blog-roll-the-dice | 68ec0aa01f6becaca1ab72e8aff1141e8f253eaa | [
"Apache-2.0"
] | null | null | null | roll_the_dice/cli.py | pluralsight/tech-blog-roll-the-dice | 68ec0aa01f6becaca1ab72e8aff1141e8f253eaa | [
"Apache-2.0"
] | 6 | 2020-04-01T01:41:37.000Z | 2021-11-17T11:14:53.000Z | import typer
from .dice import roll, roll_from_string
app = typer.Typer()
@app.command("hello")
def hello_world():
"""our first CLI with typer!
"""
typer.echo("Opening blog post...")
typer.launch(
"https://pluralsight.com/tech-blog/python-cli-utilities-with-poetry-and-typer"
)
@app.command("roll-str")
def roll_string(
dice_str: str,
rolls: bool = typer.Option(
False, help="set to display individual rolls", show_default=True
),
):
"""Rolls the dice from a formatted string.
We supply a formatted string DICE_STR describing the roll, e.g. '2D6'
for two six-sided dice.
"""
try:
rolls_list, total, formatted_roll = roll_from_string(dice_str)
except ValueError:
typer.echo(f"invalid roll string: {dice_str}")
raise typer.Exit(code=1)
typer.echo(f"rolling {formatted_roll}!\n")
typer.echo(f"your roll: {total}\n")
if rolls:
typer.echo(f"made up of {rolls_list}\n")
@app.command("roll-num")
def roll_num(
num_dice: int = typer.Option(
1, "-n", "--num-dice", help="number of dice to roll", show_default=True, min=1
),
sides: int = typer.Option(
20, "-d", "--sides", help="number-sided dice to roll", show_default=True, min=1
),
rolls: bool = typer.Option(
False, help="set to display individual rolls", show_default=True
),
):
"""Rolls the dice from numeric inputs.
We supply the number and side-count of dice to roll with option arguments.
"""
rolls_list, total = roll(num_dice=num_dice, sides=sides)
typer.echo(f"rolling {num_dice}D{sides}!\n")
typer.echo(f"your roll: {total}\n")
if rolls:
typer.echo(f"made up of {rolls_list}\n")
def main():
app()
| 25.985294 | 87 | 0.634975 | 0 | 0 | 0 | 0 | 1,658 | 0.938314 | 0 | 0 | 786 | 0.444822 |
63a60dfdf997ba92340c90aeb0a57c1998260210 | 2,311 | py | Python | scripts/dfh.py | ldamewood/figures | 5a6ee60d308f1726afe491469db3ec6cde9e6d5c | [
"MIT"
] | 5 | 2015-03-06T07:49:43.000Z | 2022-03-22T19:52:42.000Z | scripts/dfh.py | ldamewood/figures | 5a6ee60d308f1726afe491469db3ec6cde9e6d5c | [
"MIT"
] | null | null | null | scripts/dfh.py | ldamewood/figures | 5a6ee60d308f1726afe491469db3ec6cde9e6d5c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy
import itertools
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Structure
from crystal import fillcell, tikz_atoms
def dfh(single = True, defect = False):
if defect:
single = False
a = 5.43
fcc = Lattice([[a/2,a/2,0],[a/2,0,a/2],[0,a/2,a/2]])
dfh = Structure(fcc,['Si']*2,[[0.00,0.00,0.00],[0.25,0.25,0.25]])
# Make the orthogonal cubic
dfh.make_supercell([[0,0,1],[1,-1,0],[1,1,-1]])
# Rotate the cell
rt = 0.70710678118654746
symmop = SymmOp.from_rotation_and_translation([[0,rt,rt],[0,rt,-rt],[1,0,0]])
dfh.apply_operation(symmop)
# Make supercell
if single == True:
dfh.make_supercell([1,1,8])
else:
dfh.make_supercell([2,2,8])
# Insert Mn atoms
for i,atom in enumerate(dfh):
if abs(atom.frac_coords[2] - 0.5) < 0.01 and atom.specie.symbol == 'Si':
dfh.append('Mn',atom.frac_coords)
del dfh[i]
# Do defects
if defect == 1:
defectMn = numpy.array([0,0,0.5])
for i,atom in enumerate(dfh):
if numpy.linalg.norm(atom.frac_coords - defectMn) < 0.01 and atom.specie.symbol == 'Mn':
dfh.append('Si',defectMn)
del dfh[i]
if defect == 2:
defectMn = numpy.array([0.5,0.5,0.5])
for i,atom in enumerate(dfh):
if numpy.linalg.norm(atom.frac_coords - defectMn) < 0.01 and atom.specie.symbol == 'Mn':
del dfh[i]
if defect == 3:
defectMn = numpy.array([0.5,0.25,0.5-1./32])
for i,atom in enumerate(dfh):
if numpy.linalg.norm(atom.frac_coords - defectMn) < 0.01 and atom.specie.symbol == 'Si':
dfh.append('Mn',defectMn)
del dfh[i]
return dfh
atoms = dfh(single = True)
atoms_full = fillcell(atoms)
bondatoms = []
snsite = numpy.array([0.625,0.625,0.625])
for sitei,sitej in itertools.combinations(atoms_full,2):
radius = sitei.specie.atomic_radius + sitej.specie.atomic_radius
bondlength = sitei.distance_from_point(sitej.coords)
if bondlength <= 1.25 * radius:
bondatoms.append((sitei,sitej))
tikz = tikz_atoms(atoms_full, bondatoms, drawcell = True) | 33.492754 | 100 | 0.603635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.061445 |
63a6da93520ef33e9f9ab1ed832d591668deed60 | 3,458 | py | Python | instasave/__main__.py | hnrkcode/InstaSave | 81dd1638a20d8191566ad30772f652a6781d6c81 | [
"MIT"
] | 1 | 2020-07-05T19:42:44.000Z | 2020-07-05T19:42:44.000Z | instasave/__main__.py | hnrkcode/InstaSave | 81dd1638a20d8191566ad30772f652a6781d6c81 | [
"MIT"
] | null | null | null | instasave/__main__.py | hnrkcode/InstaSave | 81dd1638a20d8191566ad30772f652a6781d6c81 | [
"MIT"
] | 2 | 2019-09-10T20:21:35.000Z | 2019-09-19T09:43:03.000Z | import argparse
import os.path
from .instagram.post import Downloader
from .instagram.url import URLScraper
from .utils.webaddr import get_url, validate_url
from .web.client import HTTPHeaders
from .web.geckoloader import GeckoLoader
def get_arguments():
"""Get arguments passed to the program by the user."""
name = "instasave"
usage = "%(prog)s [options] input"
descr = (
"Download images, videos and metadata from public Instagram posts."
"Can scrape data from individual post's URL or multiple posts from a"
"user or a hashtag page."
)
parser = argparse.ArgumentParser(prog=name, usage=usage, description=descr)
parser.add_argument(
"input",
help=(
"URL to post, users or hashtags."
"A name is enough for users and hashtags."
),
)
parser.add_argument(
"-o",
"--output",
metavar=("PATH", "DIRNAME"),
nargs=2,
help="Set custom download location.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Show information about the posts that are being downloaded.",
)
parser.add_argument(
"-p",
"--post",
type=int,
default=0,
metavar="LIMIT",
help="Limit number of posts to download from a user or a hashtag.",
)
parser.add_argument(
"-H",
"--hashtag",
action="store_true",
help="Download posts from a hashtag page.",
)
args = parser.parse_args()
# Check that there is a download limit if hashtag is set.
if args.hashtag and args.post < 1:
raise parser.error(
"-p LIMIT, --post LIMIT is required if -H, --hashtag is set"
)
return parser.parse_args()
def set_downloader(headers, output, verbose):
"""Prepare to download files."""
# Set custom download location.
if output:
if not os.path.exists(output[0]):
raise SystemExit("Path doesn't exist.")
return Downloader(headers, output, verbose=verbose)
return Downloader(headers, verbose=verbose)
def main():
"""The programs main function."""
# Command line arguments from user.
args = get_arguments()
urls = [args.input]
post_limit = args.post
is_hashtag = args.hashtag
is_verbose = args.verbose
output_path = args.output
# HTTP headers with random user agent for requests.
http_req = HTTPHeaders(is_verbose)
headers = http_req.headers
useragent = http_req.headers["User-Agent"]
# Get latest geckdriver for the system if isn't already in path.
GeckoLoader(headers, is_verbose)
# Set custom download directory otherwise use current working directory.
file = set_downloader(headers, output_path, is_verbose)
output_path = file.output
# Scrape post data from user or hashtag feed.
if post_limit > 0:
# Get full url to the username or hashtag.
page_url = get_url(urls[0], is_hashtag)
if page_url:
webdriver = URLScraper(useragent, output_path)
webdriver.open(page_url)
urls = webdriver.scrape(post_limit, is_hashtag)
webdriver.close()
# Download files and save them to the output directory.
for url in urls:
post = validate_url(url)
file.download(post)
if not is_verbose:
print()
if __name__ == "__main__":
main()
| 27.664 | 79 | 0.628687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,246 | 0.360324 |
63aa46352916f59092337f4bb1b5c13be2e2644b | 649 | py | Python | modules/dbnd-airflow/src/dbnd_airflow/web/airflow_app.py | dmytrostriletskyi/dbnd | d4a5f5167523e80439c9d64182cdc87b40cbc48f | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | modules/dbnd-airflow/src/dbnd_airflow/web/airflow_app.py | dmytrostriletskyi/dbnd | d4a5f5167523e80439c9d64182cdc87b40cbc48f | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | modules/dbnd-airflow/src/dbnd_airflow/web/airflow_app.py | dmytrostriletskyi/dbnd | d4a5f5167523e80439c9d64182cdc87b40cbc48f | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | import logging
def create_app(config=None, testing=False):
from airflow.www_rbac import app as airflow_app
app, appbuilder = airflow_app.create_app(config=config, testing=testing)
# only now we can load view..
# this import might causes circular dependency if placed above
from dbnd_airflow.airflow_override.dbnd_aiflow_webserver import (
use_databand_airflow_dagbag,
)
use_databand_airflow_dagbag()
logging.info("Airflow applications has been created")
return app, appbuilder
def cached_appbuilder(config=None, testing=False):
_, appbuilder = create_app(config, testing)
return appbuilder
| 28.217391 | 76 | 0.753467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.200308 |
63ac6d028038f4c5c0f57741dd002fb5b4dacbf1 | 3,793 | py | Python | runway/lookups/handlers/random_string.py | onicagroup/runway | d50cac0e4878ff0691943029aa4f5b85d426a3b0 | [
"Apache-2.0"
] | 134 | 2018-02-26T21:35:23.000Z | 2022-03-03T00:30:27.000Z | runway/lookups/handlers/random_string.py | onicagroup/runway | d50cac0e4878ff0691943029aa4f5b85d426a3b0 | [
"Apache-2.0"
] | 937 | 2018-03-08T22:04:35.000Z | 2022-03-30T12:21:47.000Z | runway/lookups/handlers/random_string.py | onicagroup/runway | d50cac0e4878ff0691943029aa4f5b85d426a3b0 | [
"Apache-2.0"
] | 70 | 2018-02-26T23:48:11.000Z | 2022-03-02T18:44:30.000Z | """Generate a random string."""
# pyright: reportIncompatibleMethodOverride=none
from __future__ import annotations
import logging
import secrets
import string
from typing import TYPE_CHECKING, Any, Callable, List, Sequence, Union
from typing_extensions import Final, Literal
from ...utils import BaseModel
from .base import LookupHandler
if TYPE_CHECKING:
from ...context import CfnginContext, RunwayContext
LOGGER = logging.getLogger(__name__)
class ArgsDataModel(BaseModel):
"""Arguments data model."""
digits: bool = True
lowercase: bool = True
punctuation: bool = False
uppercase: bool = True
class RandomStringLookup(LookupHandler):
"""Random string lookup."""
TYPE_NAME: Final[Literal["random.string"]] = "random.string"
"""Name that the Lookup is registered as."""
@staticmethod
def calculate_char_set(args: ArgsDataModel) -> str:
"""Calculate character set from the provided arguments."""
char_set = ""
if args.digits:
char_set += string.digits
if args.lowercase:
char_set += string.ascii_lowercase
if args.punctuation:
char_set += string.punctuation
if args.uppercase:
char_set += string.ascii_uppercase
LOGGER.debug("character set: %s", char_set)
return char_set
@staticmethod
def generate_random_string(char_set: Sequence[str], length: int) -> str:
"""Generate a random string of a set length from a set of characters."""
return "".join(secrets.choice(char_set) for _ in range(length))
@staticmethod
def has_digit(value: str) -> bool:
"""Check if value contains a digit."""
return any(v.isdigit() for v in value)
@staticmethod
def has_lowercase(value: str) -> bool:
"""Check if value contains lowercase."""
return any(v.islower() for v in value)
@staticmethod
def has_punctuation(value: str) -> bool:
"""Check if value contains uppercase."""
return any(v in string.punctuation for v in value)
@staticmethod
def has_uppercase(value: str) -> bool:
"""Check if value contains uppercase."""
return any(v.isupper() for v in value)
@classmethod
def ensure_has_one_of(cls, args: ArgsDataModel, value: str) -> bool:
"""Ensure value has at least one of each required character.
Args:
args: Hook args.
value: Value to check.
"""
checks: List[Callable[[str], bool]] = []
if args.digits:
checks.append(cls.has_digit)
if args.lowercase:
checks.append(cls.has_lowercase)
if args.punctuation:
checks.append(cls.has_punctuation)
if args.uppercase:
checks.append(cls.has_uppercase)
return sum(c(value) for c in checks) == len(checks)
@classmethod
def handle( # pylint: disable=arguments-differ
cls,
value: str,
context: Union[CfnginContext, RunwayContext],
*__args: Any,
**__kwargs: Any,
) -> Any:
"""Generate a random string.
Args:
value: The value passed to the Lookup.
context: The current context object.
Raises:
ValueError: Unable to find a value for the provided query and
a default value was not provided.
"""
raw_length, raw_args = cls.parse(value)
length = int(raw_length)
args = ArgsDataModel.parse_obj(raw_args)
char_set = cls.calculate_char_set(args)
while True:
result = cls.generate_random_string(char_set, length)
if cls.ensure_has_one_of(args, result):
break
return cls.format_results(result, **raw_args)
| 30.58871 | 80 | 0.631426 | 3,332 | 0.87846 | 0 | 0 | 2,925 | 0.771157 | 0 | 0 | 1,001 | 0.263907 |
63adc83e9b99bdc20e56c3462b56c6b2c4cdbcd3 | 4,523 | py | Python | FinBot/intent/Loki_Exchange.py | Lanlanluuu/LokiHub | aae3efb566d2383e78eaa8dc1e8b3f1bb097f2a6 | [
"MIT"
] | 17 | 2020-11-25T07:40:18.000Z | 2022-03-07T03:29:18.000Z | FinBot/intent/Loki_Exchange.py | Lanlanluuu/LokiHub | aae3efb566d2383e78eaa8dc1e8b3f1bb097f2a6 | [
"MIT"
] | 8 | 2020-12-18T13:23:59.000Z | 2021-10-03T21:41:50.000Z | FinBot/intent/Loki_Exchange.py | Lanlanluuu/LokiHub | aae3efb566d2383e78eaa8dc1e8b3f1bb097f2a6 | [
"MIT"
] | 43 | 2020-12-02T09:03:57.000Z | 2021-12-23T03:30:25.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Loki module for Exchange
Input:
inputSTR str,
utterance str,
args str[],
resultDICT dict
Output:
resultDICT dict
"""
DEBUG_Exchange = True
userDefinedDICT = {"歐元":"EUR",
"美金":"USD",
"日圓":"JPY",
"台幣":"TWD",
"臺幣":"TWD",
"英鎊":"GBP",
"法郎":"CHF",
"澳幣":"AUD",
"港幣":"HKD",
"泰銖":"THB"}
# 將符合句型的參數列表印出。這是 debug 或是開發用的。
def debugInfo(inputSTR, utterance):
if DEBUG_Exchange:
print("[Exchange] {} ===> {}".format(inputSTR, utterance))
def getResult(inputSTR, utterance, args, resultDICT):
debugInfo(inputSTR, utterance)
if utterance == "[100元][美金]可以兌換[台幣]多少":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100元][美金]可以兌換多少[台幣]":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100元][美金]要[台幣]多少":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100元][美金]要多少[台幣]":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100台幣]換[美金]":
# 如果 userDefinedDICT 的 某個key x在 args[0] 裡面,就把他的key中的第0個資料拿出來(也就是貨幣的英文)
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[100美金]能換多少[台幣]":
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[100美金]要[台幣]多少":
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[100美金]要多少[台幣]":
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[今天][美金]兌換[台幣]是多少":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = None
pass
if utterance == "[美金][100]要[台幣]多少":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100]要多少[台幣]":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]可以兌換[台幣]多少":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]可以兌換多少[台幣]":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]要[台幣]多少":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]要多少[台幣]":
print("IN")
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "我想要[100元][美金]":
resultDICT["source"] = args[1]
resultDICT["target"] = None
resultDICT["amount"] = args[0]
pass
if utterance == "我想要[美金][100元]":
resultDICT["source"] = args[0]
resultDICT["target"] = None
resultDICT["amount"] = args[1]
pass
if utterance == "我想買[100元][美金]":
resultDICT["source"] = args[1]
resultDICT["target"] = None
resultDICT["amount"] = args[0]
pass
if utterance == "我想買[美金][100元]":
resultDICT["source"] = args[0]
resultDICT["target"] = None
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]是多少[法郎]":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
return resultDICT | 28.446541 | 78 | 0.516914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,777 | 0.356041 |
63ae1f61f819018618faf981a6a8c7128684233e | 1,396 | py | Python | venv/Lib/site-packages/win32comext/directsound/test/ds_record.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 150 | 2021-11-02T05:31:51.000Z | 2022-03-24T06:22:22.000Z | venv/Lib/site-packages/win32comext/directsound/test/ds_record.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 4 | 2021-12-01T11:55:58.000Z | 2022-02-24T16:14:37.000Z | venv/Lib/site-packages/win32comext/directsound/test/ds_record.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 33 | 2021-11-03T00:29:41.000Z | 2022-03-15T13:15:56.000Z | import pywintypes
import struct
import win32event, win32api
import os
import win32com.directsound.directsound as ds
def wav_header_pack(wfx, datasize):
return struct.pack(
"<4sl4s4slhhllhh4sl",
"RIFF",
36 + datasize,
"WAVE",
"fmt ",
16,
wfx.wFormatTag,
wfx.nChannels,
wfx.nSamplesPerSec,
wfx.nAvgBytesPerSec,
wfx.nBlockAlign,
wfx.wBitsPerSample,
"data",
datasize,
)
d = ds.DirectSoundCaptureCreate(None, None)
sdesc = ds.DSCBUFFERDESC()
sdesc.dwBufferBytes = 352800 # 2 seconds
sdesc.lpwfxFormat = pywintypes.WAVEFORMATEX()
sdesc.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
sdesc.lpwfxFormat.nChannels = 2
sdesc.lpwfxFormat.nSamplesPerSec = 44100
sdesc.lpwfxFormat.nAvgBytesPerSec = 176400
sdesc.lpwfxFormat.nBlockAlign = 4
sdesc.lpwfxFormat.wBitsPerSample = 16
print(sdesc)
print(d)
buffer = d.CreateCaptureBuffer(sdesc)
event = win32event.CreateEvent(None, 0, 0, None)
notify = buffer.QueryInterface(ds.IID_IDirectSoundNotify)
notify.SetNotificationPositions((ds.DSBPN_OFFSETSTOP, event))
buffer.Start(0)
win32event.WaitForSingleObject(event, -1)
data = buffer.Update(0, 352800)
fname = os.path.join(win32api.GetTempPath(), "test_directsound_record.wav")
f = open(fname, "wb")
f.write(wav_header_pack(sdesc.lpwfxFormat, 352800))
f.write(data)
f.close()
| 23.661017 | 75 | 0.722779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.063037 |
63af8fc8ef5f415eb14f6ce6760e2d9eadfd7966 | 5,322 | py | Python | _unittests/ut_df/test_pandas_groupbynan.py | Pandinosaurus/pandas_streaming | 03008b63545e3634290ef0c041e920d94d454ccf | [
"MIT"
] | 20 | 2017-09-23T03:23:13.000Z | 2022-02-21T09:10:48.000Z | _unittests/ut_df/test_pandas_groupbynan.py | Pandinosaurus/pandas_streaming | 03008b63545e3634290ef0c041e920d94d454ccf | [
"MIT"
] | 19 | 2017-09-24T17:10:39.000Z | 2021-12-29T11:02:58.000Z | _unittests/ut_df/test_pandas_groupbynan.py | Pandinosaurus/pandas_streaming | 03008b63545e3634290ef0c041e920d94d454ccf | [
"MIT"
] | 7 | 2018-11-09T08:15:20.000Z | 2021-09-17T07:39:44.000Z | # coding: utf-8
"""
@brief test log(time=1s)
"""
import unittest
import pandas
import numpy
from scipy.sparse.linalg import lsqr as sparse_lsqr
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from pandas_streaming.df import pandas_groupby_nan, numpy_types
class TestPandasHelper(ExtTestCase):
def test_pandas_groupbynan(self):
self.assertTrue(sparse_lsqr is not None)
types = [(int, -10), (float, -20.2), (str, "e"),
(bytes, bytes("a", "ascii"))]
skip = (numpy.bool_, numpy.complex64, numpy.complex128)
types += [(_, _(5)) for _ in numpy_types() if _ not in skip]
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
gr = pandas_groupby_nan(df, "value")
co = gr.sum()
li = list(co["value"])
try:
self.assertIsInstance(li[-1], float)
except AssertionError as e:
raise AssertionError("Issue with {0}".format(ty)) from e
try:
self.assertTrue(numpy.isnan(li[-1]))
except AssertionError as e:
raise AssertionError(
"Issue with value {}\n--df--\n{}\n--gr--\n{}\n--co--\n{}".format(
li, df, gr.count(), co)) from e
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
try:
gr = pandas_groupby_nan(df, ("value", "this"))
t = True
raise Exception("---")
except TypeError:
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertIsInstance(li[-1], float)
self.assertTrue(numpy.isnan(li[-1]))
try:
gr = pandas_groupby_nan(df, ["value", "this"])
t = True
except (TypeError, NotImplementedError):
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertEqual(len(li), 2)
def test_pandas_groupbynan_tuple(self):
data = [dict(a="a", b="b", c="c", n=1), dict(
b="b", n=2), dict(a="a", n=3), dict(c="c", n=4)]
df = pandas.DataFrame(data)
gr = df.groupby(["a", "b", "c"]).sum()
self.assertEqual(gr.shape, (1, 1))
for nanback in [True, False]:
try:
gr2_ = pandas_groupby_nan(
df, ["a", "b", "c"], nanback=nanback, suffix="NAN")
except NotImplementedError:
continue
gr2 = gr2_.sum().sort_values("n")
self.assertEqual(gr2.shape, (4, 4))
d = gr2.to_dict("records")
self.assertEqual(d[0]["a"], "a")
self.assertEqual(d[0]["b"], "b")
self.assertEqual(d[0]["c"], "c")
self.assertEqual(d[0]["n"], 1)
self.assertEqual(d[1]["a"], "NAN")
def test_pandas_groupbynan_regular(self):
df = pandas.DataFrame([dict(a="a", b=1), dict(a="a", b=2)])
gr = df.groupby(["a"]).sum()
gr2_ = pandas_groupby_nan(df, ["a"]).sum()
self.assertEqualDataFrame(gr, gr2_)
def test_pandas_groupbynan_regular_nanback(self):
df = pandas.DataFrame([dict(a="a", b=1, cc=0), dict(a="a", b=2)])
gr = df.groupby(["a", "cc"]).sum()
self.assertEqual(len(gr), 1)
self.assertRaise(
lambda: pandas_groupby_nan(df, ["a", "cc"], nanback=True).sum(),
NotImplementedError)
def test_pandas_groupbynan_doc(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
gr2 = pandas_groupby_nan(df, ["ind"]).sum()
ind = list(gr2['ind'])
self.assertTrue(numpy.isnan(ind[-1]))
val = list(gr2['a'])
self.assertEqual(val[-1], 30)
@ignore_warnings(UserWarning)
def test_pandas_groupbynan_doc2(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
gr2 = pandas_groupby_nan(df, ["ind", "a"], nanback=False).sum()
ind = list(gr2['ind'])
self.assertEqual(ind[-1], "²nan")
def test_pandas_groupbynan_doc3(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
self.assertRaise(lambda: pandas_groupby_nan(df, ["ind", "n"]).sum(),
NotImplementedError)
# ind = list(gr2['ind'])
# self.assertTrue(numpy.isnan(ind[-1]))
if __name__ == "__main__":
unittest.main()
| 37.216783 | 85 | 0.485908 | 4,996 | 0.938568 | 0 | 0 | 395 | 0.074206 | 0 | 0 | 632 | 0.11873 |
63afd781a3f83b4472e5cc0102407a1faccfc8c4 | 6,769 | py | Python | ros/src/tl_detector/tl_detector.py | irt24/Udacity-CarND-FinalProject | 0516a39a544f5a90c3b12b44f7ae200d53d8c375 | [
"MIT"
] | null | null | null | ros/src/tl_detector/tl_detector.py | irt24/Udacity-CarND-FinalProject | 0516a39a544f5a90c3b12b44f7ae200d53d8c375 | [
"MIT"
] | null | null | null | ros/src/tl_detector/tl_detector.py | irt24/Udacity-CarND-FinalProject | 0516a39a544f5a90c3b12b44f7ae200d53d8c375 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
from scipy.spatial import KDTree
import tf
import cv2
import yaml
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoint_tree = None
self.camera_image = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoint_tree:
self.waypoint_tree = KDTree([
[w.pose.pose.position.x, w.pose.pose.position.y]
for w in waypoints.waypoints])
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
# Publish upcoming red lights at camera frequency.
# Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
# of times till we start using it. Otherwise the previous stable state is
# used.
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
if (state == TrafficLight.RED):
rospy.logwarn("Detected RED light! Count: %i" % self.state_count)
if (state == TrafficLight.GREEN):
rospy.logwarn("Detected GREEN light! Count: %i" % self.state_count)
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
return self.waypoint_tree.query([x, y], 1)[1]
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# For testing, just return the light state.
return light.state
#if (not self.has_image):
# self.prev_light_loc = None
# return False
#cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
closest_stop_line_idx = -1
stop_line_positions = self.config['stop_line_positions']
if (self.pose):
car_waypoint_idx = self.get_closest_waypoint(self.pose.pose.position.x,
self.pose.pose.position.y)
# Find the closest visible traffic light.
# Since the list of lights is short (~8), there isn't much benefit in using KD trees.
min_distance_to_stop_line = len(self.waypoints.waypoints)
for light, stop_line in zip(self.lights, stop_line_positions):
stop_line_idx = self.get_closest_waypoint(stop_line[0], stop_line[1])
distance_to_stop_line = stop_line_idx - car_waypoint_idx
# -10 is key to getting the simualtor to work (at least on my not-so-powerful machine).
# The simulator is laggy and reports the position further ahead that it displays it.
# Often, when the car is in front of a red traffic light, it doesn't stop because it
# thinks it's past it.
if -10 <= distance_to_stop_line <= min_distance_to_stop_line:
min_distance_to_stop_line = distance_to_stop_line
closest_light = light
closest_stop_line_idx = stop_line_idx
light_state = self.get_light_state(closest_light) if closest_light else TrafficLight.UNKNOWN
return closest_stop_line_idx, light_state
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 36.989071 | 108 | 0.646033 | 6,193 | 0.914906 | 0 | 0 | 0 | 0 | 0 | 0 | 2,678 | 0.395627 |
63aff24134f0357f39793f17fa4ba1b4b87cbb8d | 1,601 | py | Python | initGame.py | Sabsterrexx/PingPongGame | 61e22838e3f48fcb7ffb4d83cf7c5d793766110b | [
"MIT"
] | null | null | null | initGame.py | Sabsterrexx/PingPongGame | 61e22838e3f48fcb7ffb4d83cf7c5d793766110b | [
"MIT"
] | null | null | null | initGame.py | Sabsterrexx/PingPongGame | 61e22838e3f48fcb7ffb4d83cf7c5d793766110b | [
"MIT"
] | null | null | null | #Import required Modules:
import pygame
import constants
from paddle import Paddle
from ball import Ball
from score import Score
from text import Text
from screenState import ScreenState
#This function basically executes everything in the "screenState" module's class
def init():
#Initialize all the constants:
screen = constants.initialize()
#Making the FPS clock:
clock = pygame.time.Clock()
FPS = 60
#Creating paddle 1 score:
paddle1Score = Score(screen)
paddle1Score.x = 100
#Creating paddle 2 score:
paddle2Score = Score(screen)
paddle2Score.color = constants.colors["RED"]
#Making 2 paddles:
paddle1 = Paddle()
paddle1.x = 10
paddle1.color = constants.colors["BLUE"]
paddle2 = Paddle()
paddle2.x = 780
paddle2.color = constants.colors["RED"]
# Making the ball:
ball = Ball()
ball.dx = ball.speed
ball.dy = ball.speed
#The ball starts at the center:
ball.x = constants.cx
ball.y = constants.cy
#The ball's intital color is blue
ball.color = constants.colors["PURPLE"]
#Creating the title screen's text:
title_text = Text(screen)
title_text.text = "Welcome to Saabit Pong Game. Difficulty keys: Easy: 1, Medium: 2, Hard: 3"
#Creating the end game screen's text
endScreen_text = Text(screen)
endScreen_text.text = "Game Over. Press 'P' to play again"
return ScreenState(screen, title_text, endScreen_text, paddle1, paddle2, ball, paddle1Score, paddle2Score, clock, FPS)
| 26.245902 | 123 | 0.658339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.334791 |
63b1672cda5a034b03a601fa6667760f80b9ca78 | 1,965 | py | Python | minmax.py | patello/subtracting_squares | 53ecfbfe96e2ddb9cb82307bae9c779a4d7d7b4d | [
"MIT"
] | null | null | null | minmax.py | patello/subtracting_squares | 53ecfbfe96e2ddb9cb82307bae9c779a4d7d7b4d | [
"MIT"
] | null | null | null | minmax.py | patello/subtracting_squares | 53ecfbfe96e2ddb9cb82307bae9c779a4d7d7b4d | [
"MIT"
] | null | null | null | class Game:
def __init__(self,span=[0,100],sub_fun="Square"):
self.span=span
self.func_dict = {
"Square":lambda x: pow(x,2),
"Odd":lambda x: 1 + (x-1)*2,
"Even":lambda x: x*2,
"Identity":lambda x: x,
"Logarithm":lambda x: pow(10,x-1)
}
self.sub_fun = self.func_dict[sub_fun]
def calculate(self):
#winning_pos is an array. It determines if landing in the current position will give you a win.
#True = Win, False = Loss, None = Not yet determined. As per the rules of the game, 0 is initialized as False.
#This is since if you start on 0, that means your opponent won the previous round.
#All others are None at the start of the calculation. The lenght is equal to the start and end point, plus 1.
winning_pos = [False]+[None]*(self.span[1]-self.span[0])
#Iterate until there no longer are any None-s.
while winning_pos.count(None)>0:
#Start with the highest False. Add numbers drawn from sub_fun to it and set those positions to True.
#If you land on those positions, you can win, because you can then put your opponent in a loosing position.
hi_false=[i for i,x in enumerate(winning_pos) if x==False][-1]
i = 1
while hi_false + self.sub_fun(i) <= self.span[1]:
winning_pos[hi_false + self.sub_fun(i)] = True
i+=1
#Then, set the lowest None to False. From that position, you will always put your opponent in a winning position
#There might be no None positions left, in that case, a value error is raised.
try:
lo_none=winning_pos.index(None)
except ValueError:
break
winning_pos[lo_none]=False
return winning_pos
if __name__ == "__main__":
game=Game()
result=game.calculate()
print(str(result[-1])) | 50.384615 | 124 | 0.603562 | 1,864 | 0.948601 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.456489 |
63b20249148e887bf399bbe4138aaff1329b132a | 983 | py | Python | fwl-automation-decisions/domain/src/domain/model/firewall/Firewall.py | aherculano/fwl-project | 6d4c4d40393b76d45cf13b572b5aabc0696e9285 | [
"MIT"
] | null | null | null | fwl-automation-decisions/domain/src/domain/model/firewall/Firewall.py | aherculano/fwl-project | 6d4c4d40393b76d45cf13b572b5aabc0696e9285 | [
"MIT"
] | null | null | null | fwl-automation-decisions/domain/src/domain/model/firewall/Firewall.py | aherculano/fwl-project | 6d4c4d40393b76d45cf13b572b5aabc0696e9285 | [
"MIT"
] | null | null | null | from .FirewallName import FirewallName
from .FirewallUUID import FirewallUUID
from .FirewallAccessLayer import FirewallAccessLayer
class Firewall(object):
def __init__(self, uuid: FirewallUUID, name: FirewallName, access_layer: FirewallAccessLayer):
self.uuid = uuid
self.name = name
self.access_layer = access_layer
@property
def uuid(self):
return self._uuid
@property
def name(self):
return self._name
@property
def access_layer(self):
return self._access_layer
@uuid.setter
def uuid(self, value: FirewallUUID):
self._uuid = value
@name.setter
def name(self, value: FirewallName):
self._name = value
@access_layer.setter
def access_layer(self, value: FirewallAccessLayer):
self._access_layer = value
def __eq__(self, other: object):
if isinstance(other, Firewall):
return self.uuid.__eq__(other.uuid)
return False
| 23.97561 | 98 | 0.670397 | 849 | 0.863683 | 0 | 0 | 452 | 0.459817 | 0 | 0 | 0 | 0 |
63b23605e301a71e7c5288370291d963f9333dd6 | 3,479 | py | Python | other_scripts/check_status.py | seyros/python_training | 15a5a3fa471d8ff63ccdd03c13bd09997a8b5794 | [
"Apache-2.0"
] | null | null | null | other_scripts/check_status.py | seyros/python_training | 15a5a3fa471d8ff63ccdd03c13bd09997a8b5794 | [
"Apache-2.0"
] | null | null | null | other_scripts/check_status.py | seyros/python_training | 15a5a3fa471d8ff63ccdd03c13bd09997a8b5794 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'ivanov'
import pymysql
# соединяемся с базой данных
connection = pymysql.connect(host="localhost", user="root", passwd="1112223334", db="testdb", charset='utf8', cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
#create new record
# sql = "INSERT INTO CHECK_STATUS (ID, LOAD_DATE, NONUNIQ_COUNT, ROW_COUNT, IDNULL_COUNT, IVNULL_COUNT, FVNULL_COUNT, CVNULL_COUNT, DVNULL_COUNT, IDZERO_COUNT, IVZERO_COUNT, FVZERO_COUNT, AV_INT_VALUE, AV_FLOAT_VALUE) VALUES (NULL, CURDATE(), (select count(*) from CHECK_OBJECT WHERE CONCAT(ID,INT_VALUE) IN (select * from (SELECT CONCAT(ID,INT_VALUE) AS CC FROM CHECK_OBJECT GROUP BY CC HAVING COUNT(*) > 1) subquary WHERE CC is not null) AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where ID is NULL AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where INT_VALUE is NULL AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where FLOAT_VALUE is NULL AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where CHAR_VALUE is NULL AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where DATE_VALUE is NULL AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where ID = 0 AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where INT_VALUE = 0 AND LOAD_DATE = CURDATE()), (select count(*) from CHECK_OBJECT where FLOAT_VALUE = 0 AND LOAD_DATE = CURDATE()), (select AVG(INT_VALUE) from CHECK_OBJECT where LOAD_DATE = CURDATE()), (select AVG(FLOAT_VALUE) from CHECK_OBJECT where LOAD_DATE = CURDATE()))"
sql = "INSERT INTO CHECK_STATUS " \
"(ID, LOAD_DATE, NONUNIQ_COUNT, ROW_COUNT, IDNULL_COUNT, IVNULL_COUNT, FVNULL_COUNT, CVNULL_COUNT, DVNULL_COUNT, IDZERO_COUNT, IVZERO_COUNT, FVZERO_COUNT, AV_INT_VALUE, AV_FLOAT_VALUE)" \
" VALUES (" \
"NULL," \
"CURDATE()," \
"(select count(*) from CHECK_OBJECT WHERE CONCAT(ID,INT_VALUE) IN " \
"(select * from (SELECT CONCAT(ID,INT_VALUE) AS CC FROM CHECK_OBJECT GROUP BY CC HAVING COUNT(*) > 1) subquary" \
" WHERE CC is not null) AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where ID is NULL AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where INT_VALUE is NULL AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where FLOAT_VALUE is NULL AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where CHAR_VALUE is NULL AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where DATE_VALUE is NULL AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where ID = 0 AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where INT_VALUE = 0 AND LOAD_DATE = CURDATE())," \
"(select count(*) from CHECK_OBJECT where FLOAT_VALUE = 0 AND LOAD_DATE = CURDATE())," \
"(select AVG(INT_VALUE) from CHECK_OBJECT where LOAD_DATE = CURDATE())," \
"(select AVG(FLOAT_VALUE) from CHECK_OBJECT where LOAD_DATE = CURDATE()))"
cursor.execute(sql)
connection.commit()
# закрываем соединение с БД
finally:
connection.close() | 89.205128 | 1,360 | 0.678068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,920 | 0.828604 |
63b3ca6cdf03796cdc1925b2b477724000dd42e8 | 3,461 | py | Python | inria/helloworld/datamodules.py | hernanlira/inria-anomaly_detection | f1dd980f7245c388f2ab583083ee72fb64d60a17 | [
"MIT"
] | null | null | null | inria/helloworld/datamodules.py | hernanlira/inria-anomaly_detection | f1dd980f7245c388f2ab583083ee72fb64d60a17 | [
"MIT"
] | null | null | null | inria/helloworld/datamodules.py | hernanlira/inria-anomaly_detection | f1dd980f7245c388f2ab583083ee72fb64d60a17 | [
"MIT"
] | null | null | null | from torchvision.datasets import MNIST
from torchvision import transforms
import pytorch_lightning as pl
from torch.utils.data import DataLoader, random_split
from multiprocessing import cpu_count
from typing import Any, Optional, Union
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
from torchvision.transforms import Compose, RandomCrop, RandomHorizontalFlip, ToTensor
class MnistDataModule(pl.LightningDataModule):
def __init__(self, data_dir="./data", batch_size=128, num_workers=None):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
if not num_workers:
num_workers = cpu_count()
self.num_workers = num_workers
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
def prepare_data(self):
# download data, train then test
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None):
# we set up only relevant datasets when stage is specified
if stage == "fit" or stage is None:
mnist = MNIST(self.data_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist, [55000, 5000])
if stage == "test" or stage is None:
self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
# we define a separate DataLoader for each of train/val/test
def train_dataloader(self):
mnist_train = DataLoader(self.mnist_train, batch_size=self.batch_size, num_workers=self.num_workers)
return mnist_train
def val_dataloader(self):
mnist_val = DataLoader(self.mnist_val, batch_size=10 * self.batch_size, num_workers=self.num_workers)
return mnist_val
def test_dataloader(self):
mnist_test = DataLoader(self.mnist_test, batch_size=10 * self.batch_size, num_workers=self.num_workers)
return mnist_test
class Cifar10DataModule(CIFAR10DataModule):
def __init__(
self,
data_dir: Optional[str] = None,
val_split: Union[int, float] = 0.2,
num_workers: int = cpu_count(),
normalize: bool = False,
batch_size: int = 32,
seed: int = 42,
shuffle: bool = True,
pin_memory: bool = True,
drop_last: bool = False,
*args: Any,
**kwargs: Any
) -> None:
if "train_transforms" not in kwargs or kwargs["train_transforms"] is None:
kwargs["train_transforms"] = Compose(
[
RandomCrop(32, padding=4),
RandomHorizontalFlip(),
ToTensor(),
cifar10_normalization(),
]
)
test_transforms = Compose(
[
ToTensor(),
cifar10_normalization(),
]
)
if "test_transforms" not in kwargs or kwargs["test_transforms"] is None:
kwargs["test_transforms"] = test_transforms
if "val_transforms" not in kwargs or kwargs["val_transforms"] is None:
kwargs["val_transforms"] = test_transforms
super().__init__(
data_dir, val_split, num_workers, normalize, batch_size, seed, shuffle, pin_memory, drop_last, *args, **kwargs
)
| 36.431579 | 122 | 0.647501 | 3,001 | 0.86709 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.093037 |
63b5c11e95ca01d34e674cc2e9e39d8f57bab66d | 9,678 | py | Python | ml3d/tf/modules/pointnet.py | eiiijiiiy/Open3D-ML | 2a04231df0be39e2b8030e480d342cee5574fb9a | [
"MIT"
] | null | null | null | ml3d/tf/modules/pointnet.py | eiiijiiiy/Open3D-ML | 2a04231df0be39e2b8030e480d342cee5574fb9a | [
"MIT"
] | null | null | null | ml3d/tf/modules/pointnet.py | eiiijiiiy/Open3D-ML | 2a04231df0be39e2b8030e480d342cee5574fb9a | [
"MIT"
] | null | null | null | import tensorflow as tf
from typing import List
from ..utils.tf_utils import gen_CNN
from ...utils import MODEL
from ..utils.pointnet import pointnet2_utils
class Pointnet2MSG(tf.keras.layers.Layer):
def __init__(
self,
in_channels=6,
use_xyz=True,
SA_config={
"npoints": [128, 32, -1],
"radius": [0.2, 0.4, 100],
"nsample": [64, 64, 64],
"mlps": [[128, 128, 128], [128, 128, 256], [256, 256, 512]]
},
fp_mlps=[]):
super().__init__()
self.SA_modules = []
skip_channel_list = [in_channels]
for i in range(len(SA_config["npoints"])):
mlps = SA_config["mlps"][i].copy()
out_channels = 0
for idx in range(len(mlps)):
mlps[idx] = [in_channels] + mlps[idx]
out_channels += mlps[idx][-1]
self.SA_modules.append(
PointnetSAModuleMSG(npoint=SA_config["npoints"][i],
radii=SA_config["radius"][i],
nsamples=SA_config["nsample"][i],
mlps=mlps,
use_xyz=use_xyz,
batch_norm=True))
in_channels = out_channels
skip_channel_list.append(out_channels)
self.FP_modules = []
for i in range(len(fp_mlps)):
pre_channel = fp_mlps[
i + 1][-1] if i + 1 < len(fp_mlps) else out_channels
self.FP_modules.append(
PointnetFPModule(mlp=[pre_channel + skip_channel_list[i]] +
fp_mlps[i],
batch_norm=True))
def _break_up_pc(self, pc):
xyz = pc[..., 0:3]
features = pc[..., 3:] if pc.shape[-1] > 3 else None
return xyz, features
def call(self, pointcloud, training=True):
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i],
l_features[i],
training=training)
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](l_xyz[i - 1],
l_xyz[i],
l_features[i - 1],
l_features[i],
training=training)
return l_xyz[0], l_features[0]
MODEL._register_module(Pointnet2MSG, 'tf')
class _PointnetSAModuleBase(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def call(self, xyz, features=None, new_xyz=None, training=True):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz:
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
if new_xyz is None and self.npoint is not None:
sampling = tf.expand_dims(pointnet2_utils.furthest_point_sample(
xyz, self.npoint),
axis=-1)
new_xyz = tf.gather_nd(xyz, sampling, batch_dims=1)
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz,
features) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features,
training=training) # (B, mlp[-1], npoint, nsample)
if self.pool_method == 'max_pool':
new_features = tf.reduce_max(new_features,
axis=-1) # (B, mlp[-1], npoint)
elif self.pool_method == 'avg_pool':
new_features = tf.reduce_mean(new_features,
axis=-1) # (B, mlp[-1], npoint)
else:
raise NotImplementedError
new_features_list.append(new_features)
return new_xyz, tf.concat(new_features_list, axis=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
batch_norm=False,
use_xyz: bool = True,
pool_method='max_pool',
use_bias=False):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = []
self.mlps = []
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(
gen_CNN(mlp_spec,
conv=tf.keras.layers.Conv2D,
batch_norm=batch_norm,
use_bias=use_bias))
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
batch_norm=False,
use_xyz: bool = True,
pool_method='max_pool',
use_bias=False):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__(mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
batch_norm=batch_norm,
use_xyz=use_xyz,
pool_method=pool_method,
use_bias=use_bias)
MODEL._register_module(PointnetSAModule, 'tf')
class PointnetFPModule(tf.keras.layers.Layer):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], batch_norm=False, use_bias=False):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super().__init__()
self.mlp = gen_CNN(mlp,
conv=tf.keras.layers.Conv2D,
batch_norm=batch_norm,
use_bias=use_bias)
def call(self, unknown, known, unknow_feats, known_feats, training=True):
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn_gpu(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = tf.reduce_sum(dist_recip, axis=2, keepdims=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate_gpu(
known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.shape[0:2],
unknown.shape[1])
if unknow_feats is not None:
new_features = tf.concat([interpolated_feats, unknow_feats],
axis=1) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = tf.expand_dims(new_features, axis=-1)
new_features = self.mlp(new_features, training=training)
return tf.squeeze(new_features, axis=-1)
MODEL._register_module(PointnetFPModule, 'tf')
if __name__ == "__main__":
pass
| 36.520755 | 103 | 0.515189 | 9,323 | 0.963319 | 0 | 0 | 0 | 0 | 0 | 0 | 2,099 | 0.216884 |
63b69e49a561a7a7f6b1bf9ff48c5966ebd4c9f7 | 1,374 | py | Python | utils/config.py | PierreHao/YouGraph | ae8cf5d5bb544f64ee206bcba07ece66d49e00e3 | [
"MIT"
] | 18 | 2021-06-16T07:34:58.000Z | 2022-03-22T11:52:20.000Z | utils/config.py | SuperXiang/YouGraph | fe3dcc77ba901893f2ffa6ae8dd773320ac5f862 | [
"MIT"
] | 1 | 2021-06-16T17:04:33.000Z | 2021-06-22T18:53:08.000Z | utils/config.py | SuperXiang/YouGraph | fe3dcc77ba901893f2ffa6ae8dd773320ac5f862 | [
"MIT"
] | 4 | 2021-07-08T02:09:27.000Z | 2022-03-31T01:08:05.000Z | import argparse
import json
from easydict import EasyDict
def get_args():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-c', '--config',
metavar='C',
default=None,
help='The Configuration file')
argparser.add_argument(
'-i', '--id',
metavar='I',
default='',
help='The commit id)')
argparser.add_argument(
'-t', '--ts',
metavar='T',
default='',
help='The time stamp)')
argparser.add_argument(
'-d', '--dir',
metavar='D',
default='',
help='The output directory)')
args = argparser.parse_args()
return args
def get_config_from_json(json_file):
# parse the configurations from the configs json file provided
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
# convert the dictionary to a namespace using bunch lib
config = EasyDict(config_dict)
return config
def process_config(args):
config = get_config_from_json(args.config)
config.commit_id = args.id
config.time_stamp = args.ts
config.directory = args.dir
return config
if __name__ == '__main__':
config = get_config_from_json('../configs/MUTAG.json')
sub_configurations = config.configurations
print(sub_configurations['pooling'])
| 24.981818 | 66 | 0.636099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.22198 |
63b74e1885d1242860e5b1d4ed405f1ff5699ee2 | 1,887 | py | Python | data_prep/train_val_test_splitter.py | GIT-chandra/DeReflectionNet | e214f8574c5d018a8568145cbbe7a78b41026c3b | [
"MIT"
] | null | null | null | data_prep/train_val_test_splitter.py | GIT-chandra/DeReflectionNet | e214f8574c5d018a8568145cbbe7a78b41026c3b | [
"MIT"
] | 1 | 2019-03-04T06:25:16.000Z | 2019-03-13T09:29:11.000Z | data_prep/train_val_test_splitter.py | GIT-chandra/DeReflectionNet | e214f8574c5d018a8568145cbbe7a78b41026c3b | [
"MIT"
] | null | null | null | import shutil
import numpy as np
ALL_SYNTHS_LIST = 'synth_imgs.txt'
TRAIN_IMAGES_LIST = 'train_imgs.txt'
VAL_IMAGES_LIST = 'val_imgs.txt'
TEST_IMAGES_LIST = 'test_imgs.txt'
TRAIN_STOP = 342000
VAL_STOP = TRAIN_STOP + 38000
'''
390000 examples : 342000 train and 38000 val (90/10 splits on 380000), 10000 test
'''
with open(ALL_SYNTHS_LIST,'r') as img_list:
files = np.array(img_list.read().splitlines())
files = files[np.random.permutation(files.shape[0])]
with open(TRAIN_IMAGES_LIST,"w") as list_file:
for i in range(TRAIN_STOP):
shutil.copy(files[i],'./train_imgs/')
shutil.copy(files[i][:-4] + "_r.jpg",'./train_imgs/')
shutil.copy(files[i][:-4] + "_b.jpg",'./train_imgs/')
fname = files[i].split('/')
fname = fname[len(fname) - 1]
list_file.write('./train_imgs/' + fname)
list_file.write('\n')
print("Copying training examples ..." + str(i) + "/342000")
with open(VAL_IMAGES_LIST,"w") as list_file:
for i in range(TRAIN_STOP,VAL_STOP):
shutil.copy(files[i],'./val_imgs/')
shutil.copy(files[i][:-4] + "_r.jpg",'./val_imgs/')
shutil.copy(files[i][:-4] + "_b.jpg",'./val_imgs/')
fname = files[i].split('/')
fname = fname[len(fname) - 1]
list_file.write('./val_imgs/' + fname)
list_file.write('\n')
print("Copying validation examples ..." + str(i) + "/38000")
with open(TEST_IMAGES_LIST,"w") as list_file:
for i in range(VAL_STOP,files.shape[0]):
shutil.copy(files[i],'./test_imgs/')
shutil.copy(files[i][:-4] + "_r.jpg",'./test_imgs/')
shutil.copy(files[i][:-4] + "_b.jpg",'./test_imgs/')
fname = files[i].split('/')
fname = fname[len(fname) - 1]
list_file.write('./test_imgs/' + fname)
list_file.write('\n')
print("Copying testing examples ..." + str(i) + "/10000")
| 34.944444 | 82 | 0.606783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.27504 |
63b7a45ec105c6ba1c889efaafdedd671e56a63a | 803 | py | Python | example/fastapi/models.py | cnjlq84/oidc-op | c7ab007327cc1a5e69abba7699c0be5f29534049 | [
"Apache-2.0"
] | 31 | 2020-09-15T21:18:05.000Z | 2022-02-17T02:50:04.000Z | example/fastapi/models.py | cnjlq84/oidc-op | c7ab007327cc1a5e69abba7699c0be5f29534049 | [
"Apache-2.0"
] | 106 | 2021-03-26T17:12:54.000Z | 2022-03-11T07:19:46.000Z | example/fastapi/models.py | cnjlq84/oidc-op | c7ab007327cc1a5e69abba7699c0be5f29534049 | [
"Apache-2.0"
] | 13 | 2020-02-12T16:31:01.000Z | 2022-03-03T09:54:44.000Z | from typing import List
from typing import Optional
from pydantic import BaseModel
class WebFingerRequest(BaseModel):
rel: Optional[str] = 'http://openid.net/specs/connect/1.0/issuer'
resource: str
class AuthorizationRequest(BaseModel):
acr_values: Optional[List[str]]
claims: Optional[dict]
claims_locales: Optional[List[str]]
client_id: str
display: Optional[str]
id_token_hint: Optional[str]
login_hint: Optional[str]
max_age: Optional[int]
nonce: Optional[str]
prompt: Optional[List[str]]
redirect_uri: str
registration: Optional[dict]
request: Optional[str]
request_uri: Optional[str]
response_mode: Optional[str]
response_type: List[str]
scope: List[str]
state: Optional[str]
ui_locales: Optional[List[str]]
| 25.09375 | 69 | 0.711083 | 713 | 0.88792 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.054795 |
63b7c7a23f4c13a732afba3bcbd544198bdcf67b | 3,957 | py | Python | fnet/functions.py | HelmholtzAI-Consultants-Munich/pytorch_fnet | 879784bd0f8e76ab8f0ed8de4235180a316e12d8 | [
"Unlicense"
] | null | null | null | fnet/functions.py | HelmholtzAI-Consultants-Munich/pytorch_fnet | 879784bd0f8e76ab8f0ed8de4235180a316e12d8 | [
"Unlicense"
] | null | null | null | fnet/functions.py | HelmholtzAI-Consultants-Munich/pytorch_fnet | 879784bd0f8e76ab8f0ed8de4235180a316e12d8 | [
"Unlicense"
] | null | null | null | import importlib
import json
import os
import pdb
import sys
import fnet
import pandas as pd
import tifffile
import numpy as np
from fnet.transforms import normalize
def pearson_loss(x, y):
#x = output
#y = target
vx = x - torch.mean(x)
vy = y - torch.mean(y)
cost = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))
return cost
# code retrieved on 21.05.21 from: https://github.com/pytorch/pytorch/issues/1254
def pearsonr(x, y):
"""
Mimics `scipy.stats.pearsonr`
Arguments
---------
x : 1D torch.Tensor
y : 1D torch.Tensor
Returns
-------
r_val : float
pearsonr correlation coefficient between x and y
Scipy docs ref:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
Scipy code ref:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py#L2975-L3033
Example:
>>> x = np.random.randn(100)
>>> y = np.random.randn(100)
>>> sp_corr = scipy.stats.pearsonr(x, y)[0]
>>> th_corr = pearsonr(torch.from_numpy(x), torch.from_numpy(y))
>>> np.allclose(sp_corr, th_corr)
"""
x = x.detach().cpu().numpy().flatten() #pred
y = y.detach().cpu().numpy().flatten() #target
pearson_img = np.corrcoef(x,y)
r_val = pearson_img[0,1]
return r_val
def load_model(path_model, gpu_ids=0, module='fnet_model', in_channels=1, out_channels=1):
module_fnet_model = importlib.import_module('fnet.' + module)
if os.path.isdir(path_model):
path_model = os.path.join(path_model, 'model.p')
model = module_fnet_model.Model(in_channels=in_channels, out_channels=out_channels)
model.load_state(path_model, gpu_ids=gpu_ids)
return model
def load_model_from_dir(path_model_dir, gpu_ids=0, in_channels=1, out_channels=1):
assert os.path.isdir(path_model_dir)
path_model_state = os.path.join(path_model_dir, 'model.p')
model = fnet.fnet_model.Model(in_channels=in_channels, out_channels=out_channels)
model.load_state(path_model_state, gpu_ids=gpu_ids)
return model
def compute_dataset_min_max_ranges(train_path, val_path=None, norm=False):
df_train = pd.read_csv(train_path)
if val_path is not None:
df_val = pd.read_csv(val_path)
df=pd.concat([df_train, df_val])
else:
df=df_train
min_bright=[]
max_bright =[]
min_inf = []
max_inf = []
min_dapi = []
max_dapi = []
if df.iloc[0,:]['target_channel'] is None:
no_target = True
else:
no_target = False
if df.iloc[0,:]['dapi_channel'] is None:
no_dapi = True
else:
no_dapi = False
for index in range(len(df)):
element=df.iloc[index, :]
image = tifffile.imread(element['file'])
if not no_target:
image_infection = image[element['target_channel'],:,:]
min_inf.append(np.min(image_infection))
max_inf.append(np.max(image_infection))
if not no_dapi:
image_dapi = image[element['dapi_channel'],:,:]
min_dapi.append(np.min(image_dapi))
max_dapi.append(np.max(image_dapi))
image_bright = image[element['signal_channel'],:,:]
if norm:
image_bright = normalize(image_bright)
min_bright.append(np.min(image_bright))
max_bright.append(np.max(image_bright))
min_inf = np.min(np.array(min_inf)) if not no_target else None
max_inf = np.max(np.array(max_inf)) if not no_target else None
min_dapi = np.min(np.array(min_dapi)) if not no_dapi else None
max_dapi = np.max(np.array(max_dapi)) if not no_dapi else None
min_bright = np.min(np.array(min_bright))
max_bright = np.max(np.array(max_bright))
return [min_bright, max_bright], [min_inf, max_inf], [min_dapi, max_dapi] | 30.674419 | 97 | 0.635583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 937 | 0.236796 |
63b7ef516ba9eb132540a0353455cfa47e2e1ffb | 3,151 | py | Python | scrape_mars.py | neilgbp/MissiontoMarsWebScrapingHW | 4afaf6a30cb03111ed53645afaedd3081cb7bbb7 | [
"ADSL"
] | null | null | null | scrape_mars.py | neilgbp/MissiontoMarsWebScrapingHW | 4afaf6a30cb03111ed53645afaedd3081cb7bbb7 | [
"ADSL"
] | null | null | null | scrape_mars.py | neilgbp/MissiontoMarsWebScrapingHW | 4afaf6a30cb03111ed53645afaedd3081cb7bbb7 | [
"ADSL"
] | null | null | null | import os
import pandas as pd
import pymongo
import requests
import time
from splinter import Browser
from bs4 import BeautifulSoup
from selenium import webdriver
print(os.path.abspath("chromedriver.exe"))
def init_browser():
executable_path = {"executable_path": os.path.abspath("chromedriver.exe")}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url = "https://mars.nasa.gov/news/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
mars_title = soup.find("div", class_="content_title").text
mars_p = soup.find("div", class_="article_teaser_body").text
img_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(img_url)
browser.click_link_by_partial_text('FULL IMAGE')
time.sleep(3)
browser.click_link_by_partial_text('more info')
img_html = browser.html
soup = BeautifulSoup(img_html, "html.parser")
img_path = soup.find('figure', class_='lede').a['href']
feat_img_url = "https://www.jpl.nasa.gov" + img_path
weather_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(weather_url)
weather_html = browser.html
soup = BeautifulSoup(weather_html, 'html.parser')
mars_weather = soup.find('p', class_="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text").text
facts_url = "https://space-facts.com/mars/"
browser.visit(facts_url)
facts_html = browser.html
soup = BeautifulSoup(facts_html, 'html.parser')
table_data = soup.find('table', class_="tablepress tablepress-id-mars")
table_all = table_data.find_all('tr')
labels = []
values = []
for tr in table_all:
td_elements = tr.find_all('td')
labels.append(td_elements[0].text)
values.append(td_elements[1].text)
mars_df = pd.DataFrame({
"Label": labels,
"Values": values
})
facts_table = mars_df.to_html(header=False, index=False)
facts_table
usgs_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(usgs_url)
usgs_html = browser.html
soup = BeautifulSoup(usgs_html, "html.parser")
mars_hemi = []
results = soup.find("div", class_="collapsible results" )
hemis = results.find_all("div", class_="item")
for hemi in hemis:
title = hemi.find("h3").text
img_link = hemi.find("a")["href"]
usgs_img_link = "https://astrogeology.usgs.gov" + img_link
browser.visit(usgs_img_link)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
downloads = soup.find("div", class_="downloads")
usgs_img_url = downloads.find("a")["href"]
mars_hemi.append({"title": title, "img_url": usgs_img_url})
mars_dict = {
"mars_title": mars_title,
"mars_p": mars_p,
"feat_img_url": feat_img_url,
"mars_weather": mars_weather,
"facts_table": facts_table,
"hemi_images": mars_hemi
}
return mars_dict
if __name__ == "__main__":
print(scrape()) | 32.484536 | 109 | 0.663916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 835 | 0.264995 |
63b9f24ae6a49d0ba7a1154d3162616044c52214 | 3,989 | py | Python | indy_node/test/rich_schema/test_rich_schemas_disabled_by_default.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 627 | 2017-07-06T12:38:08.000Z | 2022-03-30T13:18:43.000Z | indy_node/test/rich_schema/test_rich_schemas_disabled_by_default.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 580 | 2017-06-29T17:59:57.000Z | 2022-03-29T21:37:52.000Z | indy_node/test/rich_schema/test_rich_schemas_disabled_by_default.py | Rob-S/indy-node | 0aefbda62c5a7412d7e03b2fb9795c500ea67e9f | [
"Apache-2.0"
] | 704 | 2017-06-29T17:45:34.000Z | 2022-03-30T07:08:58.000Z | import json
import pytest
from indy_common.constants import JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, RS_ID, GET_RICH_SCHEMA_OBJECT_BY_ID, \
GET_RICH_SCHEMA_OBJECT_BY_METADATA, RS_NAME, RS_VERSION, RS_TYPE
from indy_node.test.api.helper import sdk_build_rich_schema_request, sdk_write_rich_schema_object_and_check
from indy_node.test.helper import rich_schemas_enabled_scope
from indy_node.test.rich_schema.templates import W3C_BASE_CONTEXT
from indy_node.test.rich_schema.test_send_get_rich_schema_obj import PARAMS
from indy_node.test.state_proof.helper import sdk_submit_operation_and_get_result
from plenum.common.constants import TXN_TYPE
from plenum.common.exceptions import RequestNackedException
from plenum.common.util import randomString
from plenum.test.helper import sdk_sign_and_submit_req, sdk_get_and_check_replies
@pytest.fixture(scope='module')
def write_rich_schema(looper, sdk_pool_handle, sdk_wallet_endorser, tconf):
with rich_schemas_enabled_scope(tconf):
for txn_type, rs_type, content, rs_id, rs_name, rs_version in PARAMS:
sdk_write_rich_schema_object_and_check(looper, sdk_wallet_endorser, sdk_pool_handle,
txn_type=txn_type, rs_id=rs_id, rs_name=rs_name,
rs_version=rs_version, rs_type=rs_type, rs_content=content)
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id',
[(JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, W3C_BASE_CONTEXT, randomString())])
def test_send_rich_schema_obj_disabled_by_default(looper, sdk_pool_handle, sdk_wallet_endorser, txn_type, rs_type,
content, rs_id):
request = sdk_build_rich_schema_request(looper, sdk_wallet_endorser,
txn_type, rs_id=rs_id, rs_name=randomString(),
rs_version='1.0', rs_type=rs_type,
rs_content=json.dumps(content))
req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_endorser, request)
with pytest.raises(RequestNackedException, match='RichSchema transactions are disabled'):
sdk_get_and_check_replies(looper, [req])
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id, rs_name, rs_version', PARAMS)
def test_send_get_rich_schema_obj_by_id_disabled_by_default(looper, sdk_pool_handle, sdk_wallet_endorser, txn_type,
rs_type, content, rs_id, rs_name, rs_version,
write_rich_schema):
get_rich_schema_by_id_operation = {
TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_ID,
RS_ID: rs_id,
}
with pytest.raises(RequestNackedException, match='RichSchema queries are disabled'):
sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser,
get_rich_schema_by_id_operation)
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id, rs_name, rs_version', PARAMS)
def test_send_get_rich_schema_obj_by_metadata_disabled_by_default(looper, sdk_pool_handle, sdk_wallet_endorser,
txn_type, rs_type, content, rs_id, rs_name,
rs_version, write_rich_schema):
get_rich_schema_by_metadata_operation = {
TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_METADATA,
RS_NAME: rs_name,
RS_VERSION: rs_version,
RS_TYPE: rs_type
}
with pytest.raises(RequestNackedException, match='RichSchema queries are disabled'):
sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser,
get_rich_schema_by_metadata_operation)
| 55.402778 | 115 | 0.670093 | 0 | 0 | 0 | 0 | 3,143 | 0.787917 | 0 | 0 | 264 | 0.066182 |
63bc76920a70004a6435e13b986e5d039044a30a | 613 | py | Python | __about__.py | a232319779/python_mmdt | 66458e08bbf6cc5961a3e4134f0969aaa4f21b71 | [
"MIT"
] | 5 | 2021-02-01T07:23:15.000Z | 2022-02-21T11:05:50.000Z | __about__.py | a232319779/python_mmdt | 66458e08bbf6cc5961a3e4134f0969aaa4f21b71 | [
"MIT"
] | null | null | null | __about__.py | a232319779/python_mmdt | 66458e08bbf6cc5961a3e4134f0969aaa4f21b71 | [
"MIT"
] | 1 | 2021-02-01T07:23:17.000Z | 2021-02-01T07:23:17.000Z | # -*- coding: utf-8 -*-
# @Time : 2021/01/05 22:53:23
# @Author : ddvv
# @Site : https://ddvvmmzz.github.io
# @File : about.py
# @Software: Visual Studio Code
__all__ = [
"__author__",
"__copyright__",
"__email__",
"__license__",
"__summary__",
"__title__",
"__uri__",
"__version__",
]
__title__ = "python_mmdt"
__summary__ = "Python wrapper for the mmdt library"
__uri__ = "https://github.com/a232319779/python_mmdt"
__version__ = "0.2.3"
__author__ = "ddvv"
__email__ = "dadavivi512@gmail.com"
__license__ = "MIT"
__copyright__ = "Copyright 2021 %s" % __author__ | 21.137931 | 53 | 0.646003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.676998 |
63bce965f7a1533a26e799f32a00a0a7b1005f81 | 135 | py | Python | img_transforms.py | martinpflaum/image-augmentation-with-point-clouds | 453947520bd74a0b7ae959c1b59e9776b9dfe7a2 | [
"MIT"
] | null | null | null | img_transforms.py | martinpflaum/image-augmentation-with-point-clouds | 453947520bd74a0b7ae959c1b59e9776b9dfe7a2 | [
"MIT"
] | null | null | null | img_transforms.py | martinpflaum/image-augmentation-with-point-clouds | 453947520bd74a0b7ae959c1b59e9776b9dfe7a2 | [
"MIT"
] | null | null | null | import random
from torchvision.transforms import functional as F
from torchvision.transforms import transforms
from PIL import Image | 33.75 | 51 | 0.851852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
63bd9e9b810c61a3a53d293b5e2aa48bf52f2c4e | 13,332 | py | Python | sourse/ui/modules/current_settings.py | LeaveMyYard/BitmexGridTrader | e7b8c4b1bfdc3dff6b02c3492ced9fb086e1519a | [
"MIT"
] | 28 | 2020-08-20T13:17:51.000Z | 2022-03-10T19:41:41.000Z | sourse/ui/modules/current_settings.py | LeaveMyYard/BitmexGridTrader | e7b8c4b1bfdc3dff6b02c3492ced9fb086e1519a | [
"MIT"
] | 5 | 2020-09-01T03:14:40.000Z | 2022-01-01T11:39:00.000Z | sourse/ui/modules/current_settings.py | LeaveMyYard/BitmexGridTrader | e7b8c4b1bfdc3dff6b02c3492ced9fb086e1519a | [
"MIT"
] | 16 | 2020-11-02T10:35:11.000Z | 2021-12-08T14:05:24.000Z | from __future__ import annotations
from sourse.ui.modules.base_qdockwidget_module import BaseUIModule
from PyQt5 import QtWidgets, QtCore, QtGui
from sourse.marketmaker import MarketMaker
import typing
import json
class InputFormat:
def __init__(
self,
widget_type: typing.Type[QtWidgets.QWidget],
name: str,
desc: str,
params: typing.Dict[str, typing.Tuple],
):
self.widget_type = widget_type
self.name = name
self.desc = desc
self.params = params
class CurrentSettingsModule(BaseUIModule):
templates_updated = QtCore.pyqtSignal()
settings_changed = QtCore.pyqtSignal()
start_button_pressed = QtCore.pyqtSignal()
stop_button_pressed = QtCore.pyqtSignal()
cancel_all_orders = QtCore.pyqtSignal()
fill_position = QtCore.pyqtSignal()
def __init__(
self,
parent: QtWidgets.QDockWidget,
marketmaker_finished_predicate: typing.Callable[[], typing.Tuple[bool, bool]],
):
self._setting_widgets: typing.Dict[str, QtWidgets.QWidget] = {}
self._keys_widgets: typing.Dict[str, typing.Optional[QtWidgets.QtWidget]] = {
"private": None,
"public": None,
}
self._marketmaker_finished_predicate = marketmaker_finished_predicate
super().__init__(parent)
self.base_widget.installEventFilter(self)
def eventFilter(self, sourse, event):
if isinstance(event, QtGui.QResizeEvent):
if event.size().height() <= 1080:
self.hide_labels()
else:
self.show_labels()
return True
def hide_labels(self):
for label in self._settings_descriptions:
label.setVisible(False)
def show_labels(self):
for label in self._settings_descriptions:
label.setVisible(True)
@staticmethod
def _get_settings_description() -> typing.Dict[str, InputFormat]:
d: typing.Dict[str, InputFormat] = {}
settings_data = json.load(open("settings.json", "r"))["settings_description"]
for i in MarketMaker.Settings.__dataclass_fields__.keys():
data = settings_data[i]
name = data["name"]
desc = data["desc"]
type_data = data["type_data"]
params: typing.Dict[str, typing.Tuple] = {}
if type_data["type"] == "int":
widget = QtWidgets.QSpinBox
params["setMinimum"] = (type_data["minimum"],)
params["setMaximum"] = (type_data["maximum"],)
if "suffix" in type_data:
params["setSuffix"] = (type_data["suffix"],)
elif type_data["type"] == "float":
widget = QtWidgets.QDoubleSpinBox
params["setMinimum"] = (type_data["minimum"],)
params["setMaximum"] = (type_data["maximum"],)
params["setDecimals"] = (type_data["decimals"],)
params["setSingleStep"] = (type_data["step"],)
if "suffix" in type_data:
params["setSuffix"] = (type_data["suffix"],)
elif type_data["type"] == "bool":
widget = QtWidgets.QCheckBox
else:
raise ValueError
d[i] = InputFormat(widget, name, desc, params)
return d
def _create_widgets(self):
self.layout = QtWidgets.QVBoxLayout(self.base_widget)
self.parent_widget.setWindowTitle("Current Settings")
self.base_widget.setLayout(self.layout)
self._settings_descriptions: typing.List[QtWidgets.QLabel] = []
self.layout.addWidget(self._create_keys_groupbox())
self.layout.addWidget(self._create_algorithm_groupbox())
self.layout.addWidget(self._create_template_saving_groupbox())
self.layout.addWidget(self._create_start_stop_button())
for i, v in enumerate([0, 1, 0]):
self.layout.setStretch(i, v)
def _create_start_stop_button(self) -> QtWidgets.QPushButton:
button = QtWidgets.QPushButton("Start bot")
running: bool = False
@QtCore.pyqtSlot()
def on_pressed():
nonlocal running
if running:
self.check_bot_finish_actions()
button.setText("Start bot")
self.stop_button_pressed.emit()
else:
button.setText("Stop bot")
self.start_button_pressed.emit()
running = not running
button.pressed.connect(on_pressed)
return button
def check_bot_finish_actions(self):
(position_filled, orders_canceled,) = self._marketmaker_finished_predicate()
if not position_filled or not orders_canceled:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
msg.setWindowTitle("The bot is unfinished")
msg.setText("The bot work in unfinished, choose what to do")
stop_button = msg.addButton("Just stop", QtWidgets.QMessageBox.AcceptRole)
cancel_orders = None
fill_position = None
both_actions = None
if not position_filled and not orders_canceled:
msg.setInformativeText(
f"Right now it seems that there are some opened orders and unrealised position."
)
cancel_orders = msg.addButton(
"Cancel orders", QtWidgets.QMessageBox.AcceptRole,
)
fill_position = msg.addButton(
"Fill position", QtWidgets.QMessageBox.AcceptRole,
)
both_actions = msg.addButton(
"Do both", QtWidgets.QMessageBox.AcceptRole,
)
elif not position_filled:
msg.setInformativeText(
f"Right now it seems that there is unrealised position."
)
fill_position = msg.addButton(
"Fill position", QtWidgets.QMessageBox.AcceptRole,
)
elif not orders_canceled:
msg.setInformativeText(
f"Right now it seems that there are some opened orders."
)
cancel_orders = msg.addButton(
"Cancel orders", QtWidgets.QMessageBox.AcceptRole,
)
cancel_button = msg.addButton("Cancel", QtWidgets.QMessageBox.RejectRole)
msg.exec()
if msg.clickedButton() == cancel_button:
return
elif msg.clickedButton() == cancel_orders:
self.cancel_all_orders.emit()
elif msg.clickedButton() == fill_position:
self.fill_position.emit()
elif msg.clickedButton() == both_actions:
self.cancel_all_orders.emit()
self.fill_position.emit()
def _create_keys_groupbox(self) -> QtWidgets.QGroupBox:
group_box = QtWidgets.QGroupBox("Keys")
layout = QtWidgets.QFormLayout(group_box)
settings_data = json.load(open("settings.json", "r"))["bitmex_client"]
label = QtWidgets.QLabel("Public key:")
widget = QtWidgets.QLineEdit()
widget.setText(settings_data["public_key"])
layout.addRow(label, widget)
self._keys_widgets["public"] = widget
label = QtWidgets.QLabel("Private key:")
widget = QtWidgets.QLineEdit()
widget.setText(settings_data["private_key"])
layout.addRow(label, widget)
self._keys_widgets["private"] = widget
return group_box
def _create_algorithm_groupbox(self) -> QtWidgets.QGroupBox:
group_box = QtWidgets.QGroupBox("Algorithm")
layout = QtWidgets.QFormLayout(group_box)
settings_format: typing.Dict[
str, InputFormat
] = self._get_settings_description()
for name, value in settings_format.items():
label = QtWidgets.QLabel(value.name + ":")
widget = value.widget_type(group_box)
if value.widget_type is QtWidgets.QCheckBox:
widget.stateChanged.connect(self.settings_changed)
else:
widget.valueChanged.connect(self.settings_changed)
self._setting_widgets[name] = widget
for func_name, params in value.params.items():
widget.__getattribute__(func_name)(*params)
comment = QtWidgets.QLabel(f"<i>{value.desc}</i><br>")
comment.setTextFormat(QtCore.Qt.RichText)
comment.setWordWrap(True)
self._settings_descriptions.append(comment)
vlayout = QtWidgets.QVBoxLayout()
vlayout.addWidget(widget)
vlayout.addWidget(comment)
layout.addRow(label, vlayout)
return group_box
def _create_template_saving_groupbox(self) -> QtWidgets.QGroupBox:
group_box = QtWidgets.QGroupBox("Save template")
outer_vlayout = QtWidgets.QVBoxLayout(group_box)
layout = QtWidgets.QFormLayout()
outer_vlayout.addLayout(layout)
label = QtWidgets.QLabel("Template name:")
vlayout = QtWidgets.QVBoxLayout()
name_input = QtWidgets.QLineEdit()
name_label = QtWidgets.QLabel("<i>The name of your template</i><br>")
name_label.setWordWrap(True)
vlayout.addWidget(name_input)
vlayout.addWidget(name_label)
layout.addRow(label, vlayout)
label = QtWidgets.QLabel("Template description:")
vlayout = QtWidgets.QVBoxLayout()
desc_input = QtWidgets.QLineEdit()
desc_label = QtWidgets.QLabel("<i>The description of your template</i><br>")
desc_label.setWordWrap(True)
vlayout.addWidget(desc_input)
vlayout.addWidget(desc_label)
layout.addRow(label, vlayout)
save_button = QtWidgets.QPushButton("Save")
save_button.setDisabled(True)
outer_vlayout.addWidget(save_button)
name_input.textChanged.connect(
lambda name: save_button.setEnabled(name != "" and desc_input.text() != "")
)
desc_input.textChanged.connect(
lambda name: save_button.setEnabled(name != "" and name_input.text() != "")
)
@QtCore.pyqtSlot()
def save_template():
name = name_input.text()
desc = desc_input.text()
settings = json.load(open("./settings.json", "r"))
if name in settings["templates"]:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
# msg.setIconPixmap(pixmap) # Своя картинка
msg.setWindowTitle("Template already exists")
msg.setText(f'Template called "{name}" already exists.')
msg.setInformativeText(
"The template, you wanted to create already exists. Do you want to replace it?"
)
okButton = msg.addButton("Yes", QtWidgets.QMessageBox.AcceptRole)
msg.addButton("No", QtWidgets.QMessageBox.RejectRole)
msg.exec()
if msg.clickedButton() != okButton:
return
settings["templates"][name] = dict(
desc=desc,
**{
name: (
widget.value()
if widget.__class__ is not QtWidgets.QCheckBox
else widget.checkState() == QtCore.Qt.Checked
)
for name, widget in self._setting_widgets.items()
},
)
with open("./settings.json", "w") as f:
json.dump(settings, f)
self.templates_updated.emit()
save_button.pressed.connect(save_template)
return group_box
@QtCore.pyqtSlot()
def on_template_loaded(self, name: str, template: MarketMaker.Settings):
for setting_name in self._setting_widgets:
if self._setting_widgets[setting_name].__class__ is QtWidgets.QCheckBox:
self._setting_widgets[setting_name].setCheckState(
QtCore.Qt.Checked
if template.__getattribute__(setting_name)
else QtCore.Qt.Unchecked
)
else:
self._setting_widgets[setting_name].setValue(
template.__getattribute__(setting_name)
)
def get_current_settings(self) -> MarketMaker.Settings:
return MarketMaker.Settings(
**{
name: (
widget.value()
if widget.__class__ is not QtWidgets.QCheckBox
else widget.checkState() == QtCore.Qt.Checked
)
for name, widget in self._setting_widgets.items()
}
)
def get_current_keys(self) -> typing.Tuple[str, str]:
if (
self._keys_widgets["public"] is not None
and self._keys_widgets["private"] is not None
):
return (
self._keys_widgets["public"].text(),
self._keys_widgets["private"].text(),
)
raise RuntimeError
| 35.93531 | 100 | 0.586559 | 13,122 | 0.983363 | 0 | 0 | 4,016 | 0.300959 | 0 | 0 | 1,305 | 0.097797 |
63be5f6fc2edffc16d8c259349723231c31bc671 | 613 | py | Python | lyrics.py | JamesK2754/COVIDBot | b4ffaa21873baa1f0c5dfd5b4d5ebb30bfd8d1a4 | [
"MIT"
] | null | null | null | lyrics.py | JamesK2754/COVIDBot | b4ffaa21873baa1f0c5dfd5b4d5ebb30bfd8d1a4 | [
"MIT"
] | null | null | null | lyrics.py | JamesK2754/COVIDBot | b4ffaa21873baa1f0c5dfd5b4d5ebb30bfd8d1a4 | [
"MIT"
] | null | null | null | import lyricsgenius
geniustoken = "Akf1AHXpbqaKHSQ06hesk8q1urZkHWJ334bzLr1SwZ1BBPSMGUm3NcbcbDR8ye7Z"
genius = lyricsgenius.Genius(geniustoken)
songname = input("")
def lysearch(songname):
import lyricsgenius
geniustoken = "Akf1AHXpbqaKHSQ06hesk8q1urZkHWJ334bzLr1SwZ1BBPSMGUm3NcbcbDR8ye7Z"
genius = lyricsgenius.Genius(geniustoken)
songname = songname.split("/")
if len(songname) == 1:
song = genius.search_song(songname[0])
elif len(songname) > 1:
song = genius.search_song(songname[0], songname[1])
ly = song.lyrics
return ly
#print(song.lyrics) | 36.058824 | 85 | 0.722675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.254486 |
63bef9d21b68c3b40a658cb21bf53511e46c9e39 | 987 | py | Python | network.py | wildarch/aiai | 765fb67b42359f8fa0b530d5baf13714de069e90 | [
"MIT"
] | 1 | 2020-12-23T12:47:41.000Z | 2020-12-23T12:47:41.000Z | network.py | wildarch/aiai | 765fb67b42359f8fa0b530d5baf13714de069e90 | [
"MIT"
] | null | null | null | network.py | wildarch/aiai | 765fb67b42359f8fa0b530d5baf13714de069e90 | [
"MIT"
] | null | null | null | import numpy as np
import numpy.random as rand
from functools import reduce
class Network:
def __init__(self, layer_sizes):
# layer_sizes: list of numbers representing number of neurons per layer
# Create a numpy array of biases for each layer except the (first) input layer
self.biases = [rand.randn(l, 1) for l in layer_sizes[1:]]
# The weights are an array of matrices. 'Between' each two layers is one matrix.
# Every row contains a set of weights for each node
self.weights = [rand.randn(y, x) for x, y in zip(layer_sizes[:-1], layer_sizes[1:])]
def feed_forward(self, input):
# Perform a left fold
return reduce(lambda input, b_w: np.dot(b_w[1], input)+b_w[0], zip(self.biases, self.weights), input)
def sigmoid(z):
# The sigmoid function
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_deriv(z):
# First-order derivative of the sigmoid function
return sigmoid(z) * (1 - sigmoid(z))
| 28.2 | 109 | 0.663627 | 704 | 0.713273 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.375887 |
63c00642ff2e4d391cdfd97b2502db83f3e78004 | 276 | py | Python | al_helper/__init__.py | Taehun/al_helper | 8e304a69359e3807564bb15954df2994e0bb8897 | [
"Apache-2.0"
] | null | null | null | al_helper/__init__.py | Taehun/al_helper | 8e304a69359e3807564bb15954df2994e0bb8897 | [
"Apache-2.0"
] | null | null | null | al_helper/__init__.py | Taehun/al_helper | 8e304a69359e3807564bb15954df2994e0bb8897 | [
"Apache-2.0"
] | null | null | null | """Let's score the unlabeled data for the active learning"""
from al_helper.apis import build
from al_helper.helpers import ALHelper, ALHelperFactory, ALHelperObjectDetection
__version__ = "0.1.0"
__all__ = ["build", "ALHelper", "ALHelperFactory", "ALHelperObjectDetection"]
| 39.428571 | 80 | 0.786232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.456522 |
63c1e56c492a20f0ed2af22f56c19c8afeb33a3d | 568 | py | Python | flocka/extensions.py | sleekslush/flocka | 3d1c0ae9bf82b7b8afb03494ee6dd8488157fe68 | [
"BSD-2-Clause"
] | 1 | 2018-10-09T14:09:12.000Z | 2018-10-09T14:09:12.000Z | flocka/extensions.py | sleekslush/flocka | 3d1c0ae9bf82b7b8afb03494ee6dd8488157fe68 | [
"BSD-2-Clause"
] | 11 | 2017-03-22T15:26:05.000Z | 2017-06-01T20:17:52.000Z | flocka/extensions.py | sleekslush/flocka | 3d1c0ae9bf82b7b8afb03494ee6dd8488157fe68 | [
"BSD-2-Clause"
] | null | null | null | from flask_cache import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_assets import Environment
from flask_migrate import Migrate
from flocka.models import User
# Setup flask cache
cache = Cache()
# Init flask assets
assets_env = Environment()
# Debug Toolbar
debug_toolbar = DebugToolbarExtension()
# Alembic
migrate = Migrate()
# Flask Login
login_manager = LoginManager()
login_manager.login_view = "main.login"
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
| 19.586207 | 52 | 0.806338 | 0 | 0 | 0 | 0 | 83 | 0.146127 | 0 | 0 | 87 | 0.153169 |
63c1f522219bcc2dba5b4f17eb780f21296ad3d6 | 90 | py | Python | django_rest_auth_embedded/tests/__init__.py | Volkova-Natalia/django_rest_auth_embedded | 43fe1d23f59332a7794365348989599cde44af6e | [
"MIT"
] | null | null | null | django_rest_auth_embedded/tests/__init__.py | Volkova-Natalia/django_rest_auth_embedded | 43fe1d23f59332a7794365348989599cde44af6e | [
"MIT"
] | 1 | 2021-02-26T16:56:31.000Z | 2021-03-24T09:47:43.000Z | django_rest_auth_email_confirm_reset/tests/__init__.py | Volkova-Natalia/django_rest_auth_email_confirm_reset | 781e63fd97606e48d69acf84fc6bb011e47b10ca | [
"MIT"
] | null | null | null | from .models import *
from .urls import *
from .views import *
from .integration import *
| 18 | 26 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
63c2e90768ca94858d6102fd8adcdc5f1544bdda | 137 | py | Python | pycwr/__init__.py | 1271756664/study | 8013dd6c597618949c5fcbf86e38502525a8136d | [
"MIT"
] | 144 | 2019-11-27T14:36:41.000Z | 2022-02-23T08:21:17.000Z | pycwr/__init__.py | 1271756664/study | 8013dd6c597618949c5fcbf86e38502525a8136d | [
"MIT"
] | 32 | 2019-11-29T10:11:53.000Z | 2022-03-14T07:46:44.000Z | pycwr/__init__.py | 1271756664/study | 8013dd6c597618949c5fcbf86e38502525a8136d | [
"MIT"
] | 57 | 2019-11-27T12:51:44.000Z | 2022-01-29T14:50:05.000Z | from . import configure, core, draw, io, interp, retrieve, qc
__all__ = ["configure", "core", "draw", "io", "interp", "qc", "retrieve"]
| 34.25 | 73 | 0.635036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.357664 |
63c55b4d12d1d8ec15b4829bd9a5a98bed33f3a3 | 979 | py | Python | app/main.py | aturX/python-swagger-docker | c05167c73205e80bd72479f215a2703565059e1c | [
"MIT"
] | null | null | null | app/main.py | aturX/python-swagger-docker | c05167c73205e80bd72479f215a2703565059e1c | [
"MIT"
] | null | null | null | app/main.py | aturX/python-swagger-docker | c05167c73205e80bd72479f215a2703565059e1c | [
"MIT"
] | 1 | 2020-01-08T13:51:24.000Z | 2020-01-08T13:51:24.000Z | import sys
sys.path.append('../') # 新加入的
sys.path.append('.') # 新加入的
from flasgger import Swagger
from flask import Flask
from v1.sum_ab_controller import demo_sum
app = Flask(__name__)
# API 文档的配置
template = {
"swagger": "2.0",
"info": {
"title": "XXX 在线API",
"description": "在线API 调用测试",
"contact": {
"responsibleOrganization": "AturX",
"responsibleDeveloper": "AturX",
"email": "pywizard6261@gmail.com",
"url": "www.me.com",
},
"termsOfService": "http://me.com/terms",
"version": "0.0.1"
},
"host": "localhost:5000", # overrides localhost:5000
"basePath": "/", # base bash for blueprint registration
"schemes": [
"http",
"https"
],
"operationId": "getmyData"
}
Swagger(app, template=template)
# 注册蓝图,并指定其对应的前缀(url_prefix)
app.register_blueprint(demo_sum, url_prefix="/sumAB")
if __name__ == '__main__':
# 访问API 接口地址 : http://localhost:5000/apidocs/
app.run(host='0.0.0.0', port='5000') | 22.767442 | 58 | 0.631256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.56701 |
63c5f609080ffa98734dc87a441cdb7f22753616 | 2,169 | py | Python | tests/test_primary_beams.py | Song655/sdp-algorithm-reference | fc7c0da9461d5a1606ebb30ed913a44cdcd9b112 | [
"Apache-2.0"
] | 1 | 2019-10-18T13:11:01.000Z | 2019-10-18T13:11:01.000Z | tests/test_primary_beams.py | Song655/sdp-algorithm-reference | fc7c0da9461d5a1606ebb30ed913a44cdcd9b112 | [
"Apache-2.0"
] | 1 | 2019-01-28T23:07:32.000Z | 2019-01-28T23:07:32.000Z | tests/test_primary_beams.py | Song655/sdp-algorithm-reference | fc7c0da9461d5a1606ebb30ed913a44cdcd9b112 | [
"Apache-2.0"
] | 5 | 2018-03-27T03:30:34.000Z | 2019-10-18T13:05:37.000Z | """Unit tests for testing support
"""
import logging
import os
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from arl.data.polarisation import PolarisationFrame
from arl.image.operations import export_image_to_fits
from arl.imaging.base import create_image_from_visibility
from arl.util.primary_beams import create_pb_vla
from arl.util.testing_support import create_named_configuration
from arl.visibility.base import create_visibility
log = logging.getLogger(__name__)
class TestPrimaryBeams(unittest.TestCase):
def setUp(self):
self.dir = './test_results'
os.makedirs(self.dir, exist_ok=True)
self.frequency = numpy.linspace(1e8, 1.5e8, 3)
self.channel_bandwidth = numpy.array([2.5e7, 2.5e7, 2.5e7])
self.flux = numpy.array([[100.0], [100.0], [100.0]])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
self.config = create_named_configuration('LOWBD2-CORE')
self.times = numpy.linspace(-300.0, 300.0, 3) * numpy.pi / 43200.0
nants = self.config.xyz.shape[0]
assert nants > 1
assert len(self.config.names) == nants
assert len(self.config.mount) == nants
def createVis(self, config, dec=-35.0, rmax=None):
self.config = create_named_configuration(config, rmax=rmax)
self.phasecentre = SkyCoord(ra=+15 * u.deg, dec=dec * u.deg, frame='icrs', equinox='J2000')
self.vis = create_visibility(self.config, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'))
def test_create_primary_beams_vla(self):
self.createVis(config='LOWBD2', rmax=1000.0)
model = create_image_from_visibility(self.vis, cellsize=0.00001, override_cellsize=True)
beam=create_pb_vla(model)
assert numpy.max(beam.data) > 0.0
export_image_to_fits(beam, "%s/primary_beam_vla.fits" % self.dir) | 40.924528 | 103 | 0.674504 | 1,636 | 0.754265 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.063163 |
63c70a3e82f535c37cce5aff704d8df99b4e6ede | 3,676 | py | Python | trees/tree_to_pairwisedistance.py | johned0/EdwardsLab | ae0d8b51a579cd009b414d11224b4110ba13af66 | [
"MIT"
] | 30 | 2015-01-25T16:22:51.000Z | 2022-01-20T15:56:47.000Z | trees/tree_to_pairwisedistance.py | johned0/EdwardsLab | ae0d8b51a579cd009b414d11224b4110ba13af66 | [
"MIT"
] | 2 | 2020-04-13T15:00:37.000Z | 2020-09-23T12:35:59.000Z | trees/tree_to_pairwisedistance.py | johned0/EdwardsLab | ae0d8b51a579cd009b414d11224b4110ba13af66 | [
"MIT"
] | 24 | 2015-04-17T00:52:05.000Z | 2021-11-26T17:50:01.000Z | """
Start with a tree file, use ete3 to create a pairwise distance for all nodes. Basically this is the
distance matrix but as tuples.
if we have a tree like
----A
_____________|y
| |
| ----B
________|z
| ----C
| |
|____________|x -----D
| |
|______|w
|
|
-----E
Where w,x,y,z are internal nodes.
d(A,B) = d(y,A) + d(y,B)
and
d(A, E) = d(z,A) + d(z, E) = {d(z,y) + d(y,A)} + {d(z,x) + d(x,w) + d(w,E)}
We use an idea inspired by the ete3 team: https://gist.github.com/jhcepas/279f9009f46bf675e3a890c19191158b :
For each node find its path to the root.
e.g.
A -> A, y, z
E -> E, w, x,z
and make these orderless sets. Then we XOR the two sets to only find the elements
that are in one or other sets but not both. In this case A, E, y, x, w.
The distance between the two nodes is the sum of the distances from each of those nodes
to the parent
One more optimization: since the distances are symmetric, and distance to itself is zero
we user itertools.combinations rather than itertools.permutations. This cuts our computes from theta(n^2)
1/2n^2 - n (= O(n^2), which is still not great, but in reality speeds things up for large trees).
"""
import os
import sys
import argparse
from itertools import combinations
from ete3 import Tree
def make_dists(treefile, printone, verbose):
"""
Create pairwise distances from a tree file
:param treefile: the tree file to parse
:param printone: if true we only print one copy of the pair (ie. A -> B). If false we print A->B and B->A
:param verbose: make some additional output
:return:
"""
tree = Tree(treefile)
leaves = tree.get_leaves()
paths = {x:set() for x in leaves}
# get the paths going up the tree
# we get all the nodes up to the last one and store them in a set
if verbose:
sys.stderr.write("Precalculating distances\n")
for n in leaves:
if n.is_root():
continue
movingnode = n
while not movingnode.is_root():
paths[n].add(movingnode)
movingnode = movingnode.up
# now we want to get all pairs of nodes using itertools combinations. We need AB AC etc but don't need BA CA
leaf_distances = {x.name:{} for x in leaves}
if verbose:
sys.stderr.write("Iterating over the leaves\n")
for (leaf1, leaf2) in combinations(leaves, 2):
# figure out the unique nodes in the path
uniquenodes = paths[leaf1] ^ paths[leaf2]
distance = sum(x.dist for x in uniquenodes)
if printone:
if leaf1.name < leaf2.name:
print("{}\t{}\t{}".format(leaf1.name, leaf2.name, distance))
else:
print("{}\t{}\t{}".format(leaf2.name, leaf1.name, distance))
else:
print("{}\t{}\t{}".format(leaf1.name, leaf2.name, distance))
print("{}\t{}\t{}".format(leaf2.name, leaf1.name, distance))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert a tree into a distance matrix')
parser.add_argument('-t', help='Tree file', required=True)
parser.add_argument('-p', help='Print one direction (A->B). Default is to print A->B and B->A', action='store_true')
parser.add_argument('-v', help='Verbose output. (Mostly progress)', action='store_true')
args = parser.parse_args()
make_dists(args.t, args.p, args.v) | 34.35514 | 120 | 0.590588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,294 | 0.624048 |
63c7b536e598facda277d873456c65962337d344 | 508 | py | Python | src/hitachi2020_b.py | 06keito/study-atcoder | c859e542079b550d19fa5e5e632e982a0dbb9578 | [
"MIT"
] | 1 | 2021-08-19T07:21:47.000Z | 2021-08-19T07:21:47.000Z | src/hitachi2020_b.py | 06keito/main-repository | c859e542079b550d19fa5e5e632e982a0dbb9578 | [
"MIT"
] | null | null | null | src/hitachi2020_b.py | 06keito/main-repository | c859e542079b550d19fa5e5e632e982a0dbb9578 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def main():
A,B,M = map(int,input().split())
A_prise = list(map(int,input().split()))
B_prise = list(map(int,input().split()))
Most_low_prise = min(A_prise)+min(B_prise)
for i in range(M):
x,y,c = map(int,input().split())
Post_coupon_orientation_prise = A_prise[x-1]+B_prise[y-1]-c
Most_low_prise = min(Most_low_prise,Post_coupon_orientation_prise)
print(Most_low_prise)
if __name__ == '__main__':
main() | 31.75 | 74 | 0.627953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.106299 |
63c88a948245c1382a743f6e1329878390cf91ac | 51,310 | py | Python | gazoo_device/tests/unit_tests/utility_tests/adb_utils_test.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | gazoo_device/tests/unit_tests/utility_tests/adb_utils_test.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | gazoo_device/tests/unit_tests/utility_tests/adb_utils_test.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test script performs unit tests on functions in the adb_utils module."""
import grp
import json
import os
import subprocess
from unittest import mock
from gazoo_device import config
from gazoo_device import errors
from gazoo_device.tests.unit_tests.utils import unit_test_case
from gazoo_device.utility import adb_utils
from gazoo_device.utility import host_utils
ADB_CMD_PATH = "/usr/bin/adb"
FAKE_ADB_DEVICES_OUTPUT = ("List of devices attached\n"
"04576e89\tdevice\n"
"04576ee5\tsideload\n"
"04576eaz\toffline\n"
"123.45.67.89:5555\tdevice\n"
"123.45.67.90:5555\tsideload\n"
"123.45.67.91:5555\toffline\n\n")
ADB_DEVICES = ["04576e89", "123.45.67.89"]
SIDELOAD_DEVICES = ["04576ee5", "123.45.67.90:5555"]
FAKE_ADB_REBOOT = ""
FAKE_ADB_ROOT = ""
FAKE_SHELL = "abc\n123\n"
FASTBOOT_CMD_PATH = "/usr/bin/fastboot"
FASTBOOT_CMD = os.path.basename(FASTBOOT_CMD_PATH)
FASTBOOT_DEVICES = ["04576e89", "06011HFDD0165R", "04576ee5"]
FAKE_FASTBOOT = ("04576e89 fastboot\n"
"06011HFDD0165R Android Fastboot\n"
"04576ee5 fastboot\n\n")
FAKE_FASTBOOT_REBOOT = ("Rebooting...\n\n"
"Finished. Total time: 0.157s\n")
DEVICE_NAME = "somedevice"
DEVICE_ADB_SERIAL = "aabbccdd"
DEVICE_FASTBOOT_SERIAL = "aabbccdd"
TEST_GROUP_ENTRY = ("plugdev", None, 46, None)
TEST_GOOD_GROUP_LIST = [42, 46]
TEST_USER_UID = 1000
TEST_USER_NAME = "test_user"
class AdbUtilsTests(unit_test_case.UnitTestCase):
"""ADB utility tests."""
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_010_adb_utils_get_fastboot_path_raises_error(self,
mock_get_command_path):
"""Verify get_fastboot_path raises error if get_command_path fails."""
with self.assertRaises(RuntimeError):
adb_utils.get_fastboot_path()
mock_get_command_path.assert_called()
@mock.patch.object(
host_utils, "get_command_path", return_value=FASTBOOT_CMD_PATH)
def test_011_adb_utils_get_fastboot_path_calls_get_command_path(
self, mock_get_command_path):
"""Verify get_fastboot_path calls get_command_path."""
self.assertEqual(FASTBOOT_CMD_PATH, adb_utils.get_fastboot_path())
mock_get_command_path.assert_called()
@mock.patch.object(
subprocess,
"check_output",
return_value=FAKE_FASTBOOT.encode("utf-8", errors="replace"))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_020_adb_utils_get_fastboot_devices_calls_get_fastboot_path(
self, mock_get_fastboot_path, mock_subprocess):
"""Verify get_fastboot_devices calls get_fastboot_path."""
self.assertEqual(FASTBOOT_DEVICES, adb_utils.get_fastboot_devices())
mock_get_fastboot_path.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_021_adb_utils_get_fastboot_devices_bad_fastboot_path(
self, mock_has_command):
"""Verify get_fastboot_devices skips get_fastboot_path."""
devices = adb_utils.get_fastboot_devices(fastboot_path="bogus/path")
self.assertEqual(devices, [])
mock_has_command.assert_called()
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(-1, ["fastboot", "devices"]))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_022_adb_utils_get_fastboot_devices_subprocess_errors(
self, mock_get_fastboot_path, mock_subprocess):
"""Verify get_fastboot_devices handles subprocess errors internally."""
self.assertEqual([], adb_utils.get_fastboot_devices())
mock_get_fastboot_path.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
def test_023_adb_utils_get_fastboot_path_uses_correct_path(self, mock_exists):
"""Verify get_fastboot_devices skips get_fastboot_path."""
path = adb_utils.get_fastboot_path(fastboot_path="genuine/path")
self.assertEqual(path, "genuine/path")
@mock.patch.object(
adb_utils, "get_fastboot_devices", return_value=FASTBOOT_DEVICES)
def test_030_adb_utils_is_fastboot_mode_true(self, mock_get_fastboot_devices):
"""Verify is_fastboot_mode returns True."""
adb_serial = "04576e89"
self.assertTrue(adb_utils.is_fastboot_mode(adb_serial))
mock_get_fastboot_devices.assert_called()
@mock.patch.object(
adb_utils, "get_fastboot_devices", return_value=FASTBOOT_DEVICES)
def test_031_adb_utils_is_fastboot_mode_false(self,
mock_get_fastboot_devices):
"""Verify is_fastboot_mode returns False."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_fastboot_mode(adb_serial))
mock_get_fastboot_devices.assert_called()
@mock.patch.object(
adb_utils, "get_sideload_devices", return_value=SIDELOAD_DEVICES)
def test_032_adb_utils_is_sideload_mode_true(self, mock_get_sideload_devices):
"""Verify is_sideload_mode on True."""
adb_serial = SIDELOAD_DEVICES[0]
self.assertTrue(adb_utils.is_sideload_mode(adb_serial))
mock_get_sideload_devices.assert_called_once()
@mock.patch.object(
adb_utils, "get_sideload_devices", return_value=SIDELOAD_DEVICES)
def test_033_adb_utils_is_sideload_mode_false(self,
mock_get_sideload_devices):
"""Verify is_sideload_mode on False."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_sideload_mode(adb_serial))
mock_get_sideload_devices.assert_called_once()
@mock.patch.object(
subprocess,
"check_output",
return_value=FASTBOOT_CMD_PATH.encode("utf-8", errors="replace"))
@mock.patch.object(grp, "getgrnam", return_value=TEST_GROUP_ENTRY)
@mock.patch.object(os, "getgroups", return_value=TEST_GOOD_GROUP_LIST)
@mock.patch.object(os, "getuid", return_value=TEST_USER_UID)
@mock.patch.object(os, "getlogin", return_value=TEST_USER_NAME)
def test_040_adb_utils_verify_user_has_fastboot(self, mock_getlogin,
mock_getuid, mock_getgroups,
mock_getgrnam,
mock_check_output):
"""Verify that verify_usr_has_fastboot works correctly."""
try:
adb_utils.verify_user_has_fastboot(DEVICE_NAME)
mock_check_output.assert_called()
except subprocess.CalledProcessError as err:
self.fail("verify_user_has_fastboot() raised error: {!r}".format(err))
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(1, ["which", FASTBOOT_CMD]))
def test_041_adb_utils_verify_user_has_fastboot_no_fastboot(
self, mock_check_output):
"""Verify that verify_user_has_fastboot raises if fastboot not present."""
with self.assertRaises(errors.DeviceError):
adb_utils.verify_user_has_fastboot(DEVICE_NAME)
mock_check_output.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
def test_050_adb_utils_get_adb_path_no_config_file(self,
mock_get_command_path):
"""Verify get_adb_path handles open errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
@mock.patch.object(json, "load", side_effect=ValueError)
def test_051_adb_utils_get_adb_path_bad_config_data(self, mock_json_load,
mock_get_command_path):
"""Verify get_adb_path handles json.load errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_json_load.assert_called()
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
def test_052_adb_utils_get_adb_path_no_adb_path_in_config(
self, mock_get_command_path):
"""Verify get_adb_path handles missing adb_path key errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_053_adb_utils_get_adb_path_bad_adb_path_raises_error(
self, mock_has_command):
"""Verify get_adb_path bad adb_path raises error."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{\"")
gdm_config.write(config.ADB_BIN_PATH_CONFIG)
gdm_config.write("\":")
gdm_config.write("\"/some/bad/path\"}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
with self.assertRaises(RuntimeError):
adb_utils.get_adb_path()
@mock.patch.object(os.path, "exists", return_value=True)
def test_054_adb_utils_get_fadb_path_uses_correct_path(self, mock_exists):
"""Verify get_adb_path defaults to path passed in."""
path = adb_utils.get_adb_path(adb_path="genuine/path")
self.assertEqual(path, "genuine/path")
@mock.patch.object(
adb_utils, "_adb_command", return_value=FAKE_ADB_DEVICES_OUTPUT)
def test_060_adb_utils_get_adb_devices_calls_get_adb_path(
self, mock_adb_command):
"""Verify get_adb_devices calls _adb_command."""
self.assertEqual(ADB_DEVICES, adb_utils.get_adb_devices())
mock_adb_command.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
@mock.patch.object(os.path, "exists", return_value=False)
def test_061_adb_utils_get_adb_devices_returns_list_when_no_adb(
self, mock_exists, mock_has_command):
"""Verify get_adb_devices calls _adb_command."""
self.assertEqual([], adb_utils.get_adb_devices())
@mock.patch.object(
adb_utils, "_adb_command", return_value=FAKE_ADB_DEVICES_OUTPUT)
def test_062_adb_utils_get_sideload_devices_on_success(
self, mock_adb_command):
"""Verify get_sideload_devices returns devices on success."""
self.assertEqual(SIDELOAD_DEVICES, adb_utils.get_sideload_devices())
mock_adb_command.assert_called_once_with("devices", adb_path=None)
@mock.patch.object(adb_utils, "_adb_command", side_effect=RuntimeError())
def test_063_adb_utils_get_sideload_devices_on_failure(
self, mock_adb_command):
"""Verify get_sideload_devices returns empty list on failure."""
self.assertEqual([], adb_utils.get_sideload_devices())
mock_adb_command.assert_called_once_with("devices", adb_path=None)
@mock.patch.object(adb_utils, "get_adb_devices", return_value=ADB_DEVICES)
def test_070_adb_utils_is_adb_mode_returns_true(self, mock_get_adb_devices):
"""Verify is_adb_mode calls get_adb_devices."""
adb_serial = "04576e89"
self.assertTrue(adb_utils.is_adb_mode(adb_serial))
mock_get_adb_devices.assert_called()
@mock.patch.object(adb_utils, "get_adb_devices", return_value=ADB_DEVICES)
def test_071_adb_utils_is_adb_mode_returns_false(self, mock_get_adb_devices):
"""Verify is_adb_mode calls get_adb_devices."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_adb_mode(adb_serial))
mock_get_adb_devices.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=False)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=True)
def test_080_adb_utils_is_device_online_yes_no(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and not is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_not_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=True)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=False)
def test_081_adb_utils_is_device_online_no_yes(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=False)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=False)
def test_082_adb_utils_is_device_online_no_no(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and is_fastboot_mode."""
self.assertFalse(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=True)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=True)
def test_083_adb_utils_is_device_online_yes_yes(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and not is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_not_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_100_adb_utils_adb_command_without_adb_serial(self,
mock_get_adb_path):
"""Verify _adb_command without adb_serial."""
command = "fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_101_adb_utils_adb_command_with_string_command(
self, mock_get_adb_path):
"""Verify _adb_command with string command."""
command = "fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_102_adb_utils_adb_command_with_string_command(
self, mock_get_adb_path):
"""Verify _adb_command with unicode command."""
command = u"fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_103_adb_utils_adb_command_with_list_command(self, mock_get_adb_path):
"""Verify _adb_command with command list."""
command = ["fake_command", "arg1"]
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_104_adb_utils_adb_command_with_tuple_command(self,
mock_get_adb_path):
"""Verify _adb_command with tuple list."""
command = ("fake_command", "arg1")
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(os.path, "exists", return_value=False)
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_105_adb_utils_adb_command_bad_adb_path(self, mock_has_command,
mock_os_path_exists):
"""Verify _adb_command skips get_adb_path raises error on bad path."""
with self.assertRaises(RuntimeError):
adb_utils._adb_command(
"fake_command", DEVICE_ADB_SERIAL, adb_path="bogus/path")
mock_os_path_exists.assert_called()
mock_has_command.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_106_adb_utils_adb_command_include_return_code(
self, mock_get_adb_path):
"""Verify _adb_command include_return_code returns tuple."""
command = "fake_command"
command_output = "fake output\n"
command_return_code = 1
mock_popen = mock.MagicMock(
spec=subprocess.Popen, returncode=command_return_code)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output, return_code = adb_utils._adb_command(
command, DEVICE_ADB_SERIAL, include_return_code=True)
self.assertEqual(command_output, output)
self.assertEqual(command_return_code, return_code)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_107_adb_utils_adb_command_with_offline(self, mock_get_adb_path):
"""Verify _adb_command succeeds if output includes "offline"."""
command = "fake_command"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (
FAKE_ADB_DEVICES_OUTPUT.encode("utf-8"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command)
self.assertEqual(FAKE_ADB_DEVICES_OUTPUT, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
@mock.patch.object(os.path, "exists", return_value=True)
def test_119_adb_utils_install_package_on_device_success(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device on success."""
fake_package_path = "/tmp/xxx.apk"
adb_utils.install_package_on_device(
fake_package_path, adb_serial=DEVICE_ADB_SERIAL, adb_path=ADB_CMD_PATH)
mock_path_exists.assert_called_once_with(fake_package_path)
mock_adb_command.assert_called_once_with(("install", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
@mock.patch.object(os.path, "exists", return_value=True)
def test_120_adb_utils_install_package_on_device_with_flags_success(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device with flags on success."""
fake_package_path = "/tmp/xxx.apk"
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
allow_downgrade=True,
allow_test_apk=True,
reinstall=True,
all_permissions=True)
mock_path_exists.assert_called_once_with(fake_package_path)
mock_adb_command.assert_called_once_with(
("install", "-d", "-g", "-r", "-t", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command")
@mock.patch.object(os.path, "exists")
def test_121_adb_utils_install_package_on_device_exception(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device raise exception."""
# Note:
# install_package_on_device() raises exception when:
# 1) package_path is not a file.
# 2) 'Success\n' is not found in command response.
fake_package_path = "/tmp/xxx.apk"
# 1) package path not a file
mock_path_exists.return_value = False
with self.assertRaises(ValueError):
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_path_exists.assert_called_with(fake_package_path)
# 2) 'Success\n' is not in command response
mock_path_exists.return_value = True
mock_adb_command.return_value = ""
with self.assertRaises(errors.DeviceError):
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_with(("install", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
def test_122_adb_utils_uninstall_package_on_device_success(
self, mock_adb_command):
"""Verify uninstall_package_on_device on success."""
fake_package_name = "com.google.fakepackage"
adb_utils.uninstall_package_on_device(
fake_package_name, adb_serial=DEVICE_ADB_SERIAL, adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(("uninstall", fake_package_name),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="")
def test_123_adb_utils_uninstall_package_on_device_exception(
self, mock_adb_command):
"""Verify uninstall_package_on_device raise exception."""
fake_package_name = "com.google.fakepackage"
with self.assertRaises(errors.DeviceError):
adb_utils.uninstall_package_on_device(
fake_package_name,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(("uninstall", fake_package_name),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_SHELL)
@mock.patch.object(os.path, "isfile", return_value=True)
def test_124_adb_utils_sideload_package_on_success(self, mock_os_path_isfile,
mock_adb_command):
"""Verify sideload_pacakge calls _adb_command."""
package_path = "/tmp/abc"
self.assertEqual(
adb_utils.sideload_package(package_path, DEVICE_ADB_SERIAL), FAKE_SHELL)
mock_os_path_isfile.assert_called_once_with(package_path)
mock_adb_command.assert_called_once_with(("sideload", package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=None)
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_SHELL)
@mock.patch.object(os.path, "isfile", return_value=False)
def test_125_adb_utils_sideload_package_on_exception(self,
mock_os_path_isfile,
mock_adb_command):
"""Verify sideload_pacakge raises exception when package_path invalid."""
package_path = "/tmp/abc"
with self.assertRaises(RuntimeError):
adb_utils.sideload_package(package_path, DEVICE_ADB_SERIAL)
mock_os_path_isfile.assert_called_once_with(package_path)
mock_adb_command.assert_not_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_REBOOT)
def test_140_adb_utils_enter_fastboot_calls_get_adb_path(
self, mock_adb_command):
"""Verify enter_fastboot calls get_adb_path."""
self.assertEqual(FAKE_ADB_REBOOT,
adb_utils.enter_fastboot(DEVICE_ADB_SERIAL))
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_REBOOT)
def test_141_adb_utils_enter_sideload(self, mock_adb_command):
"""Verify enter_sideload calls _adb_command."""
# Note:
# Verify both 1) sideload auto reboot and 2) no auto reboot.
# With auto_reboot: False
self.assertEqual(
FAKE_ADB_REBOOT,
adb_utils.enter_sideload(DEVICE_ADB_SERIAL, auto_reboot=False))
mock_adb_command.assert_called_with(("reboot", "sideload"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=None)
# With auto_reboot: True
self.assertEqual(
FAKE_ADB_REBOOT,
adb_utils.enter_sideload(DEVICE_ADB_SERIAL, auto_reboot=True))
mock_adb_command.assert_called_with(("reboot", "sideload-auto-reboot"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=None)
@mock.patch.object(
subprocess,
"check_output",
return_value=FAKE_FASTBOOT_REBOOT.encode("utf-8", errors="replace"))
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_150_adb_utils_exit_fastboot_calls_get_fastboot_path(
self, mock_get_fastboot_path, mock_os_path_exists, mock_subprocess):
"""Verify exit_fastboot calls get_fastboot_path."""
self.assertEqual(FAKE_FASTBOOT_REBOOT,
adb_utils.exit_fastboot(DEVICE_ADB_SERIAL))
mock_get_fastboot_path.assert_called()
mock_os_path_exists.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(os.path, "exists", return_value=False)
@mock.patch.object(adb_utils, "get_fastboot_path")
def test_151_adb_utils_exit_fastboot_bad_fastboot_path(
self, mock_get_fastboot_path, mock_os_path_exists):
"""Verify exit_fastboot skips get_fastboot_path."""
with self.assertRaises(RuntimeError):
adb_utils.exit_fastboot(DEVICE_ADB_SERIAL, fastboot_path="bogus/path")
mock_get_fastboot_path.assert_not_called()
mock_os_path_exists.assert_called()
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(
-1, ["timeout", "10.0", "fastboot", "reboot"]))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
@mock.patch.object(os.path, "exists", return_value=True)
def test_152_adb_utils_exit_fastboot_bad_request(self, mock_get_fastboot_path,
mock_os_path_exists,
mock_check_output):
"""Verify exit_fastboot returns None."""
result = adb_utils.exit_fastboot(DEVICE_ADB_SERIAL)
self.assertIsNone(result)
mock_get_fastboot_path.assert_called()
mock_os_path_exists.assert_called()
mock_check_output.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_REBOOT)
def test_160_adb_utils_reboot_device_calls_get_adb_path(
self, mock_adb_command):
"""Verify reboot_device calls get_adb_path."""
self.assertEqual(FAKE_ADB_REBOOT,
adb_utils.reboot_device(DEVICE_ADB_SERIAL))
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_ADB_ROOT)
def test_170_adb_utils_root_device_calls_get_adb_path(self, mock_adb_command):
"""Verify root_device calls get_adb_path."""
self.assertEqual(FAKE_ADB_ROOT, adb_utils.root_device(DEVICE_ADB_SERIAL))
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("pull output\n", 0))
def test_180_adb_utils_pull_from_device_with_single_file(
self, mock_adb_command):
"""Verify pull_file for a single source file."""
sources = "/some/device/path/to/file"
adb_utils.pull_from_device(DEVICE_ADB_SERIAL, sources)
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("pull output\n", 0))
def test_181_adb_utils_pull_from_device_with_multiple_files(
self, mock_adb_command):
"""Verify pull_from_device calls get_adb_path."""
sources = ["/some/device/path/to/file", "/some/device/path/to/other_file"]
adb_utils.pull_from_device(DEVICE_ADB_SERIAL, sources)
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("pull output\n", 1))
def test_182_adb_utils_pull_from_device_bad_returncode(
self, mock_adb_command):
"""Verify pull_from_device raises if ADB command fails."""
sources = "/some/device/path/to/file"
with self.assertRaises(RuntimeError):
adb_utils.pull_from_device(DEVICE_ADB_SERIAL, sources)
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command")
@mock.patch.object(os.path, "exists", return_value=False)
def test_183_adb_utils_pull_from_device_bad_destination_path(
self, mock_os_path_exists, mock_adb_command):
"""Verify pull_from_device provided bad destination path."""
sources = "/some/device/path/to/file"
destination_path = "/bogus/path"
with self.assertRaises(ValueError):
adb_utils.pull_from_device(
DEVICE_ADB_SERIAL, sources, destination_path=destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_not_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("push output\n", 0))
@mock.patch.object(os.path, "exists", return_value=True)
def test_190_adb_utils_push_to_device_with_single_file(
self, mock_os_path_exists, mock_adb_command):
"""Verify push_to_device sends a single file."""
sources = "/fake/local/path"
destination_path = "/fake/device/path"
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("push output\n", 0))
@mock.patch.object(os.path, "exists", return_value=True)
def test_191_adb_utils_push_to_device_with_multiple_files(
self, mock_os_path_exists, mock_adb_command):
"""Verify push_to_device sends multiple files."""
sources = ["/fake/local/path/to/file1", "/fake/local/path/to/file2"]
destination_path = "/fake/device/path"
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_called()
@mock.patch.object(os.path, "exists", return_value=False)
def test_192_adb_utils_push_to_device_fails_single_file(
self, mock_os_path_exists):
"""Verify push_to_device fails single file path check."""
sources = "/bogus/local/file"
destination_path = "/fake/device/path"
with self.assertRaises(ValueError):
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
@mock.patch.object(os.path, "exists", side_effect=[True, False])
def test_193_adb_utils_push_to_device_fails_multiple_files(
self, mock_os_path_exists):
"""Verify push_to_device fails multiple files path check."""
sources = ["/fake/local/path/to/file1", "/fake/local/path/to/file2"]
destination_path = "/fake/device/path"
with self.assertRaises(ValueError):
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("push output\n", 1))
@mock.patch.object(os.path, "exists", return_value=True)
def test_194_adb_utils_push_to_device_bad_returncode(self,
mock_os_path_exists,
mock_adb_command):
"""Verify push_file subprocess.communicate returns non-zero returncode."""
sources = "/fake/local/path"
destination_path = "/fake/device/path"
with self.assertRaises(RuntimeError):
adb_utils.push_to_device(DEVICE_ADB_SERIAL, sources, destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value="fake\n")
def test_200_adb_shell(self, mock_adb_command):
"""Verifies shell works as expected."""
self.assertEqual("fake\n", adb_utils.shell("12345", 'echo "fake"'))
mock_adb_command.assert_called_once_with(
["shell", 'echo "fake"'], "12345",
adb_path=None, retries=mock.ANY, timeout=None,
include_return_code=False)
@mock.patch.object(adb_utils, "_adb_command", return_value=("fake\n", 0))
def test_201_adb_shell_include_return_code(self, mock_adb_command):
"""Verifies shell include return code will return output and code tuple."""
output, return_code = adb_utils.shell(
"12345", 'echo "fake"', include_return_code=True)
self.assertEqual("fake\n", output)
self.assertEqual(0, return_code)
mock_adb_command.assert_called_once_with(
["shell", 'echo "fake"'], "12345",
adb_path=None, retries=mock.ANY, timeout=None, include_return_code=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value="/fake/path/to/fastboot")
@mock.patch.object(os.path, "exists", return_value=False)
def test_300_adb_utils_fastboot_command_without_fastboot_path(
self, mock_exists, mock_get_fastboot_path):
"""Verify get_fastboot_path called when fastboot_path is not given."""
with self.assertRaises(RuntimeError):
adb_utils._fastboot_command("fake command")
mock_get_fastboot_path.assert_called_once()
mock_exists.assert_called()
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value="/fake/path/to/fastboot")
@mock.patch.object(os.path, "exists", return_value=False)
def test_301_adb_utils_fastboot_command_with_bad_fastboot_path(
self, mock_exists, mock_get_fastboot_path):
"""Verify _fastboot_command raise error when given a bad fastboot_path."""
with self.assertRaises(RuntimeError):
adb_utils._fastboot_command(
"fake_command", fastboot_path="/fake/path/to/fastboot")
mock_get_fastboot_path.assert_not_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
def test_302_adb_utils_fastboot_command_without_fastboot_serial(
self, mock_exists):
"""Verify _fastboot_command without fastboot_serial."""
fastboot_executable = "fastboot"
command = "fake_command"
command_output = "fake_command_output"
mock_proc = mock.MagicMock(spec=subprocess.Popen)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(
command, fastboot_path=fastboot_executable)
self.assertEqual(output, command_output)
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_303_adb_utils_fastboot_command_with_string_command(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with string command."""
command = "fake_command"
command_output = "fake command output"
mock_proc = mock.MagicMock(spec=subprocess.Popen)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_304_adb_utils_fastboot_command_with_string_command_unicode(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with unicode string command."""
command = u"fake_command"
command_output = "fake command output"
mock_proc = mock.MagicMock(spec=subprocess.Popen)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_305_adb_utils_fastboot_command_with_list_command(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with command list."""
command = ["fake_command", "arg1"]
command_output = "fake output"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_306_adb_utils_fastboot_command_with_tuple_command(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command with command tuple."""
command = ("fake_command", "arg1")
command_output = "fake output"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output = adb_utils._fastboot_command(command, DEVICE_FASTBOOT_SERIAL)
self.assertEqual(command_output, output)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_307_adb_utils_fastboot_command_include_return_code(
self, mock_get_fastboot_path, mock_exists):
"""Verify _fastboot_command include_return_code works."""
command = "fake_command"
command_output = "fake output"
command_return_code = 1
mock_proc = mock.MagicMock(
spec=subprocess.Popen, returncode=command_return_code)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
output, return_code = adb_utils._fastboot_command(
command, DEVICE_FASTBOOT_SERIAL, include_return_code=True)
self.assertEqual(command_output, output)
self.assertEqual(command_return_code, return_code)
mock_get_fastboot_path.assert_called()
mock_exists.assert_called()
@mock.patch.object(adb_utils, "_fastboot_command")
def test_308_adb_utils_fastboot_unlock_device(self, mock_fastboot_command):
"""Verify fastbook_unlock_device calls _fastboot_command correctly."""
fastboot_serial = "fake_fastboot_serial"
fastboot_path = FASTBOOT_CMD_PATH
fastboot_timeout = 30.0
adb_utils.fastboot_unlock_device(
fastboot_serial, fastboot_path=fastboot_path, timeout=fastboot_timeout)
mock_fastboot_command.assert_called()
mock_fastboot_command.assert_called_with(("flashing", "unlock"),
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=fastboot_timeout)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_309_adb_utils_fastboot_lock_device(self, mock_fastboot_command):
"""Verify fastbook_lock_device calls _fastboot_command correctly."""
fastboot_serial = "fake_fastboot_serial"
fastboot_path = FASTBOOT_CMD_PATH
fastboot_timeout = 30.0
adb_utils.fastboot_lock_device(
fastboot_serial, fastboot_path=fastboot_path, timeout=fastboot_timeout)
mock_fastboot_command.assert_called()
mock_fastboot_command.assert_called_with(("flashing", "lock"),
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=fastboot_timeout)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_310_adb_utils_fastboot_wipe_userdata(self, mock_fastboot_command):
"""Verify fastboot_wipe_userdata calls _fastboot_command correctly."""
fastboot_serial = "fake_fastboot_serial"
fastboot_path = FASTBOOT_CMD_PATH
fastboot_timeout = 30.0
adb_utils.fastboot_wipe_userdata(
fastboot_serial, fastboot_path=fastboot_path, timeout=fastboot_timeout)
mock_fastboot_command.assert_called()
mock_fastboot_command.assert_called_with(
"-w",
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=fastboot_timeout)
@mock.patch.object(
adb_utils,
"_adb_command",
return_value="connected to aabbccdd")
def test_311_adb_connect(self, mock_adb_command):
"""Verify adb connect method."""
adb_utils.connect(DEVICE_ADB_SERIAL)
@mock.patch.object(
adb_utils,
"_adb_command",
return_value="unable to connect")
def test_312_adb_connect_failure_to_connect(self, mock_adb_command):
"""Verify adb connect method."""
with self.assertRaises(errors.DeviceError):
adb_utils.connect(DEVICE_ADB_SERIAL)
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_313_adb_command_terminate(self, mock_get_adb_path):
"""Verify adb connect method."""
command = "fake_command"
command_output = "fake output\n"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.side_effect = subprocess.TimeoutExpired(
cmd=command, timeout=1)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
with mock.patch.object(mock_proc, "terminate") as mock_terminate:
with self.assertRaises(subprocess.TimeoutExpired):
adb_utils.shell(DEVICE_ADB_SERIAL, command, timeout=1)
mock_terminate.assert_called_once()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_314_adb_shell_retry_failed(self, mock_adb_command):
"""Verify shell works as expected."""
command_output = "error: closed"
mock_proc = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_proc.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_proc):
with self.assertRaises(errors.DeviceError):
adb_utils.shell('echo "fake"', "12345")
@mock.patch.object(adb_utils, "_adb_command", return_value=("Output", 0))
def test_320_adb_utils_add_port_forwarding_success(self, mock_adb_command):
"""Verifies add_port_forwarding on success."""
output = adb_utils.add_port_forwarding(host_port=123,
device_port=456,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "tcp:123", "tcp:456"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
self.assertEqual(output, "Output")
@mock.patch.object(adb_utils, "_adb_command", return_value=("Error", 1))
def test_321_adb_utils_add_port_forwarding_exception(self, mock_adb_command):
"""Verifies add_port_forwarding raises exception."""
with self.assertRaises(RuntimeError):
adb_utils.add_port_forwarding(host_port=123,
device_port=456,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "tcp:123", "tcp:456"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
@mock.patch.object(adb_utils, "_adb_command", return_value=("Output", 0))
def test_325_adb_utils_remove_port_forwarding_success(self, mock_adb_command):
"""Verifies remove_port_forwarding on success."""
output = adb_utils.remove_port_forwarding(host_port=123,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "--remove", "tcp:123"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
self.assertEqual(output, "Output")
@mock.patch.object(adb_utils, "_adb_command", return_value=("Error", 1))
def test_326_adb_utils_remove_port_forwarding_exception(self,
mock_adb_command):
"""Verifies remove_port_forwarding on raise exception."""
with self.assertRaises(RuntimeError):
adb_utils.remove_port_forwarding(host_port=123,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(
("forward", "--remove", "tcp:123"),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
include_return_code=True)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_330_adb_utils_fastboot_check_is_unlocked(self,
mock_fastboot_command):
"""Verifies fastboot_check_is_unlocked function return correct result."""
fastboot_serial = "fake_fastboot_serial"
unlocked_output = "unlocked: yes"
locked_output = "unlocked: no"
mock_fastboot_command.return_value = unlocked_output
unlocked_expected = adb_utils.fastboot_check_is_unlocked(
fastboot_serial=fastboot_serial)
mock_fastboot_command.return_value = locked_output
locked_expected = adb_utils.fastboot_check_is_unlocked(
fastboot_serial=fastboot_serial)
self.assertTrue(unlocked_expected)
self.assertFalse(locked_expected)
@mock.patch.object(adb_utils, "_fastboot_command")
def test_331_adb_utils_fastboot_check_is_unlocked_exception(
self, mock_fastboot_command):
"""Verifies fastboot_check_is_unlocked function raises with bad output."""
fastboot_serial = "fake_fastboot_serial"
unknown_output = "something went wrong"
mock_fastboot_command.return_value = unknown_output
with self.assertRaises(RuntimeError):
adb_utils.fastboot_check_is_unlocked(fastboot_serial=fastboot_serial)
@mock.patch.object(
adb_utils, "_adb_command", return_value=("bugreport output\n", 0))
def test_340_adb_utils_bugreport(self, mock_adb_command):
"""Verifies bugreport."""
adb_utils.bugreport(DEVICE_ADB_SERIAL)
mock_adb_command.assert_called()
@mock.patch.object(
adb_utils, "_adb_command", return_value=("bugreport output\n", 1))
def test_341_adb_utils_bugreport_bad_returncode(
self, mock_adb_command):
"""Verifies bugreport raises if ADB command fails."""
with self.assertRaises(RuntimeError):
adb_utils.bugreport(DEVICE_ADB_SERIAL)
mock_adb_command.assert_called()
@mock.patch.object(adb_utils, "_adb_command")
@mock.patch.object(os.path, "exists", return_value=False)
def test_342_adb_utils_pull_from_device_bad_destination_path(
self, mock_os_path_exists, mock_adb_command):
"""Verifies bugreport provided bad destination path."""
destination_path = "/bogus/path"
with self.assertRaises(ValueError):
adb_utils.bugreport(DEVICE_ADB_SERIAL, destination_path=destination_path)
mock_os_path_exists.assert_called()
mock_adb_command.assert_not_called()
if __name__ == "__main__":
unit_test_case.main()
| 47.159926 | 80 | 0.717657 | 49,119 | 0.957299 | 0 | 0 | 48,707 | 0.949269 | 0 | 0 | 10,528 | 0.205184 |
63cacc17d61840c87378bcbce1b36a18495e24a2 | 1,269 | py | Python | webapp/apps/dynamic/urls.py | codekansas/PolicyBrain | 83c459db883536bae70cc78ca92ebdcff554ac2d | [
"MIT"
] | null | null | null | webapp/apps/dynamic/urls.py | codekansas/PolicyBrain | 83c459db883536bae70cc78ca92ebdcff554ac2d | [
"MIT"
] | null | null | null | webapp/apps/dynamic/urls.py | codekansas/PolicyBrain | 83c459db883536bae70cc78ca92ebdcff554ac2d | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from .views import (show_job_submitted, dynamic_input, dynamic_finished,
ogusa_results, dynamic_landing, dynamic_behavioral,
behavior_results, edit_dynamic_behavioral, elastic_results,
dynamic_elasticities, edit_dynamic_elastic)
urlpatterns = patterns('',
url(r'^results/(?P<pk>\d+)/', ogusa_results, name='ogusa_results'),
url(r'^(?P<pk>\d+)/', dynamic_landing, name='dynamic_landing'),
url(r'^ogusa/(?P<pk>\d+)/', dynamic_input, name='dynamic_input'),
url(r'^behavioral/(?P<pk>\d+)/', dynamic_behavioral, name='dynamic_behavioral'),
url(r'^behavioral/edit/(?P<pk>\d+)/', edit_dynamic_behavioral, name='edit_dynamic_behavioral'),
url(r'^macro/edit/(?P<pk>\d+)/', edit_dynamic_elastic, name='edit_dynamic_elastic'),
url(r'^macro/(?P<pk>\d+)/', dynamic_elasticities, name='dynamic_elasticities'),
url(r'^submitted/(?P<pk>\d+)/', show_job_submitted, name='show_job_submitted'),
url(r'^macro_results/(?P<pk>\d+)/', elastic_results, name='elastic_results'),
url(r'^behavior_results/(?P<pk>\d+)/', behavior_results, name='behavior_results'),
url(r'^dynamic_finished/', dynamic_finished, name='dynamic_finished'),
)
| 57.681818 | 99 | 0.684791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 491 | 0.386919 |
63cb3e72171de82701ab61371e19e92285bb291d | 8,866 | py | Python | gbdtmo/gbdtmo.py | samanemami/GBDTMO | 33ee163d5db4dd71dae620c8e1f8295ed33c0a24 | [
"MIT"
] | 2 | 2021-09-15T16:18:15.000Z | 2022-01-12T10:35:18.000Z | gbdtmo/gbdtmo.py | samanemami/GBDTMO | 33ee163d5db4dd71dae620c8e1f8295ed33c0a24 | [
"MIT"
] | null | null | null | gbdtmo/gbdtmo.py | samanemami/GBDTMO | 33ee163d5db4dd71dae620c8e1f8295ed33c0a24 | [
"MIT"
] | null | null | null | import numpy as np
import numpy.ctypeslib as npct
import ctypes
from .histogram import get_bins_maps
from .lib_utils import *
class BoostUtils:
def __init__(self, lib):
self.lib = lib
self._boostnode = None
def _set_gh(self, g, h):
self.lib.SetGH(self._boostnode, g, h)
def _set_bin(self, bins):
num, value = [], []
for i, _ in enumerate(bins):
num.append(len(_))
num = np.array(num, np.uint16)
value = np.concatenate(bins, axis=0)
self.lib.SetBin(self._boostnode, num, value)
def _set_label(self, x: np.array, is_train: bool):
if x.dtype == np.float64:
if x.ndim == 1:
self.lib.SetLabelDouble.argtypes = [ctypes.c_void_p, array_1d_double, ctypes.c_bool]
elif x.ndim == 2:
self.lib.SetLabelDouble.argtypes = [ctypes.c_void_p, array_2d_double, ctypes.c_bool]
else:
assert False, "label must be 1D or 2D array"
self.lib.SetLabelDouble(self._boostnode, x, is_train)
elif x.dtype == np.int32:
if x.ndim == 1:
self.lib.SetLabelInt.argtypes = [ctypes.c_void_p, array_1d_int, ctypes.c_bool]
elif x.ndim == 2:
self.lib.SetLabelInt.argtypes = [ctypes.c_void_p, array_2d_int, ctypes.c_bool]
else:
assert False, "label must be 1D or 2D array"
self.lib.SetLabelInt(self._boostnode, x, is_train)
else:
assert False, "dtype of label must be float64 or int32"
def boost(self):
self.lib.Boost(self._boostnode)
def dump(self, path):
self.lib.Dump(self._boostnode, path)
def load(self, path):
self.lib.Load(self._boostnode, path)
def train(self, num):
self.lib.Train(self._boostnode, num)
class GBDTSingle(BoostUtils):
def __init__(self, lib, out_dim=1, params={}):
super(BoostUtils, self).__init__()
BoostUtils.__init__(self, lib)
self.out_dim = out_dim
self.params = default_params()
self.params.update(params)
self.__dict__.update(self.params)
def set_booster(self, inp_dim):
self._boostnode = self.lib.SingleNew(inp_dim,
self.params['loss'],
self.params['max_depth'],
self.params['max_leaves'],
self.params['seed'],
self.params['min_samples'],
self.params['num_threads'],
self.params['lr'],
self.params['reg_l1'],
self.params['reg_l2'],
self.params['gamma'],
self.params['base_score'],
self.params['early_stop'],
self.params['verbose'],
self.params['hist_cache'])
def set_data(self, train_set: tuple = None, eval_set: tuple = None):
if train_set is not None:
self.data, self.label = train_set
self.set_booster(self.data.shape[-1])
self.bins, self.maps = get_bins_maps(self.data, self.max_bins, self.num_threads)
self._set_bin(self.bins)
self.maps = np.ascontiguousarray(self.maps.transpose())
self.preds_train = np.full(len(self.data) * self.out_dim, self.base_score, dtype='float64')
self.lib.SetData.argtypes = [ctypes.c_void_p, array_2d_uint16, array_2d_double,
array_1d_double, ctypes.c_int, ctypes.c_bool]
self.lib.SetData(self._boostnode, self.maps, self.data,
self.preds_train, len(self.data), True)
if self.label is not None:
self._set_label(self.label, True)
if eval_set is not None:
self.data_eval, self.label_eval = eval_set
self.preds_eval = np.full(len(self.data_eval) * self.out_dim, self.base_score, dtype='float64')
maps = np.zeros((1, 1), 'uint16')
self.lib.SetData(self._boostnode, maps, self.data_eval,
self.preds_eval, len(self.data_eval), False)
if self.label_eval is not None:
self._set_label(self.label_eval, False)
def train_multi(self, num):
'''
only used for multi-classification
'''
assert self.out_dim>1, "out_dim must bigger than 1"
self.lib.TrainMulti(self._boostnode, num, self.out_dim)
def predict(self, x, num_trees=0):
preds = np.full(len(x) * self.out_dim, self.base_score, dtype='float64')
if self.out_dim == 1:
self.lib.Predict.argtypes = [ctypes.c_void_p, array_2d_double, array_1d_double,
ctypes.c_int, ctypes.c_int]
self.lib.Predict(self._boostnode, x, preds, len(x), num_trees)
return preds
else:
self.lib.PredictMulti(self._boostnode, x, preds, len(x), self.out_dim, num_trees)
preds = np.reshape(preds, (self.out_dim, len(x)))
return np.transpose(preds)
def reset(self):
self.lib.Reset(self._boostnode)
class GBDTMulti(BoostUtils):
def __init__(self, lib, out_dim=1, params={}):
super(BoostUtils, self).__init__()
BoostUtils.__init__(self, lib)
self.out_dim = out_dim
self.params = default_params()
self.params.update(params)
self.__dict__.update(self.params)
def set_booster(self, inp_dim, out_dim):
self._boostnode = self.lib.MultiNew(inp_dim,
self.out_dim,
self.params['topk'],
self.params['loss'],
self.params['max_depth'],
self.params['max_leaves'],
self.params['seed'],
self.params['min_samples'],
self.params['num_threads'],
self.params['lr'],
self.params['reg_l1'],
self.params['reg_l2'],
self.params['gamma'],
self.params['base_score'],
self.params['early_stop'],
self.params['one_side'],
self.params['verbose'],
self.params['hist_cache'])
def set_data(self, train_set: tuple = None, eval_set: tuple = None):
if train_set is not None:
self.data, self.label = train_set
self.set_booster(self.data.shape[-1], self.out_dim)
self.bins, self.maps = get_bins_maps(self.data, self.max_bins, self.num_threads)
self._set_bin(self.bins)
self.maps = np.ascontiguousarray(self.maps.transpose())
self.preds_train = np.full((len(self.data), self.out_dim), self.base_score, dtype='float64')
self.lib.SetData.argtypes = [ctypes.c_void_p, array_2d_uint16, array_2d_double,
array_2d_double, ctypes.c_int, ctypes.c_bool]
self.lib.SetData(self._boostnode, self.maps, self.data,
self.preds_train, len(self.data), True)
if self.label is not None:
self._set_label(self.label, True)
if eval_set is not None:
self.data_eval, self.label_eval = eval_set
self.preds_eval = np.full((len(self.data_eval), self.out_dim), self.base_score, dtype='float64')
maps = np.zeros((1, 1), 'uint16')
self.lib.SetData(self._boostnode, maps, self.data_eval,
self.preds_eval, len(self.data_eval), False)
if self.label_eval is not None:
self._set_label(self.label_eval, False)
def predict(self, x, num_trees=0):
preds = np.full((len(x), self.out_dim), self.base_score, dtype='float64')
self.lib.Predict.argtypes = [ctypes.c_void_p, array_2d_double, array_2d_double,
ctypes.c_int, ctypes.c_int]
self.lib.Predict(self._boostnode, x, preds, len(x), num_trees)
return preds
| 46.663158 | 108 | 0.518498 | 8,732 | 0.984886 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.060794 |
63cb9cfcd1d1bed86874e36912a9244d3c5563c5 | 443 | py | Python | hold-tests/test_role_ans_dev.py | pahoughton/ansible-pahoughton | dba7c014e43fa232bea05b84f96d5b9115800089 | [
"CC-BY-3.0"
] | null | null | null | hold-tests/test_role_ans_dev.py | pahoughton/ansible-pahoughton | dba7c014e43fa232bea05b84f96d5b9115800089 | [
"CC-BY-3.0"
] | null | null | null | hold-tests/test_role_ans_dev.py | pahoughton/ansible-pahoughton | dba7c014e43fa232bea05b84f96d5b9115800089 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python3
# 2018-10-13 (cc) <paul4hough@gmail.com>
'''
pips:
- testinfra
- molecule
- tox
'''
class test_role_ans_dev (object):
''' test_role_ans_dev useless class
'''
assert packages.installed(
yaml.array( pkgs[common],
pkgs[os][common],
pkgs[os][major], )
)
assert pips.installed()
assert validate.python("https://github.com/python/stuff")
| 17.72 | 61 | 0.573363 | 326 | 0.735892 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.419865 |
63cba68bb6cb48b86439201796c5193de0beeb23 | 8,310 | py | Python | padertorch/contrib/cb/summary.py | jensheit/padertorch | 5827ec6ee768c32ae97348050846c7988b6bd5fa | [
"MIT"
] | null | null | null | padertorch/contrib/cb/summary.py | jensheit/padertorch | 5827ec6ee768c32ae97348050846c7988b6bd5fa | [
"MIT"
] | null | null | null | padertorch/contrib/cb/summary.py | jensheit/padertorch | 5827ec6ee768c32ae97348050846c7988b6bd5fa | [
"MIT"
] | null | null | null |
import collections
import torch
import einops
import cached_property
import padertorch as pt
# loss: torch.Tenso r =None,
# losses: dict =None,
# scalars: dict =None,
# histograms: dict =None,
# audios: dict =None,
# images: dict =None,
class ReviewSummary(collections.abc.Mapping):
"""
>>> review_summary = ReviewSummary()
>>> review_summary
ReviewSummary(prefix='', _data={})
"""
_keys = set(pt.train.hooks.SummaryHook.empty_summary_dict().keys()) | {
'loss', 'losses'
}
def __init__(self, prefix='', _data=None, sampling_rate=None, visible_dB=60):
if _data is None:
_data = {}
self.data = _data
self.prefix = prefix
self.sampling_rate = sampling_rate
self.visible_dB = visible_dB
def add_to_loss(self, value):
assert torch.isfinite(value), value
if 'loss' in self.data:
self.data['loss'] = self.data['loss'] + value
else:
self.data['loss'] = value
def add_scalar(self, name, *value):
# Save the mean of all added values
value = pt.data.batch.example_to_numpy(value, detach=True)
self.data.setdefault(
'scalars',
{}
).setdefault(
f'{self.prefix}{name}',
[]
).extend(value)
def add_audio(self, name, signal, sampling_rate=None, batch_first=None,
normalize=True):
if sampling_rate is None:
sampling_rate = self.sampling_rate
assert sampling_rate is not None, sampling_rate
audio = pt.summary.audio(
signal=signal, sampling_rate=sampling_rate,
batch_first=batch_first, normalize=normalize
)
self.data.setdefault(
'audios',
{}
)[f'{self.prefix}{name}'] = audio
def add_text(self, name, text):
assert isinstance(text, str), (type(text), text)
self.data.setdefault(
'texts',
{}
)[f'{self.prefix}{name}'] = text
def _rearrange(self, array, rearrange):
if rearrange is not None:
return einops.rearrange(array, rearrange)
else:
return array
def add_image(self, name, image):
# Save the last added value
image = pt.utils.to_numpy(image, detach=True)
if image.ndim != 3:
raise AssertionError(
'Did you forgot to call "pt.summary.*_to_image"?\n'
f'Expect ndim == 3, got shape {image.shape}.'
)
self.data.setdefault(
'images',
{}
)[f'{self.prefix}{name}'] = image
def add_stft_image(
self, name, signal,
*, batch_first=None, color='viridis', rearrange=None):
signal = self._rearrange(signal, rearrange)
image = pt.summary.stft_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB)
self.add_image(name, image)
def add_spectrogram_image(
self, name, signal,
*, batch_first=None, color='viridis', rearrange=None):
signal = self._rearrange(signal, rearrange)
image = pt.summary.spectrogram_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB)
self.add_image(name, image)
def add_mask_image(self, name, mask, *, batch_first=None, color='viridis', rearrange=None):
mask = self._rearrange(mask, rearrange)
image = pt.summary.mask_to_image(mask, batch_first=batch_first, color=color)
self.add_image(name, image)
def add_histogram(self, name, values):
value = pt.utils.to_numpy(values, detach=True)
self.data.setdefault(
'histograms',
{}
).setdefault(
f'{self.prefix}{name}',
[]
).append(value)
def __contains__(self, item):
return item in self.data
def __getitem__(self, key):
assert key in self._keys, (key, self._keys)
return self.data[key]
def __setitem__(self, key, value):
assert key in self._keys, (key, self._keys)
self.data[key] = value
def get(self, item, default):
if item in self:
return self.data[item]
else:
return default
def pop(self, *args, **kwargs):
"""pop(key[, default])"""
return self.data.pop(*args, **kwargs)
def setdefault(self, key, default):
self.data.setdefault(key, default)
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
return f'{self.__class__.__name__}(prefix={self.prefix!r}, _data={dict(self)!r})'
def _repr_pretty_(self, p, cycle):
"""
>>> review_summary = ReviewSummary()
>>> review_summary.add_to_loss(1)
>>> review_summary.add_scalar('abc', 2)
>>> review_summary
ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}})
>>> from IPython.lib.pretty import pprint
>>> pprint(review_summary)
ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}})
>>> pprint(review_summary, max_width=79-18)
ReviewSummary(
prefix='',
_data={'loss': 1, 'scalars': {'abc': [2]}}
)
>>> pprint(review_summary, max_width=79-40)
ReviewSummary(
prefix='',
_data={'loss': 1,
'scalars': {'abc': [2]}}
)
"""
if cycle:
p.text(f'{self.__class__.__name__}(...)')
else:
txt = f'{self.__class__.__name__}('
with p.group(4, txt, ''):
p.breakable(sep='')
p.text('prefix=')
p.pretty(self.prefix)
p.text(',')
p.breakable()
txt = '_data='
with p.group(len(txt), txt, ''):
p.pretty(dict(self))
p.breakable('')
p.text(')')
class _Plotter:
def __init__(self, review: 'ReviewSummary'):
self.review = review
def image(
self, key, origin='lower', **kwargs
):
import numpy as np
import matplotlib.pyplot as plt
kwargs = {
'origin': origin,
**kwargs,
}
if key not in self.review['images']:
from paderbox.utils.mapping import DispatchError
raise DispatchError(key, self.review['images'].keys())
X = np.einsum('chw->hwc', self.review['images'][key])
if origin == 'lower':
X = X[::-1]
else:
assert origin == 'upper'
# ToDo: Where is AxesImage defined?
ax: 'plt.AxesImage' = plt.imshow(
X,
**kwargs,
)
# ax.set_title(key)
plt.title(key)
plt.grid(False)
return ax
def images(
self,
columns=1,
font_scale=1.0,
line_width=3,
figure_size=(8.0, 6.0),
):
from paderbox.visualization import axes_context
from paderbox.visualization.context_manager import _AxesHandler
with axes_context(
columns=columns,
font_scale=font_scale,
line_width=line_width,
figure_size=figure_size,
) as axes:
axes: _AxesHandler
for k in self.review['images']:
axes.new.grid(False) # set gca
self.image(k)
@cached_property.cached_property
def plot(self):
return self._Plotter(self)
def play(self, key=None):
if key is None:
for k in self['audios'].keys():
self.play(k)
elif key in self['audios']:
from paderbox.io.play import play
data, sample_rate = self['audios'][key]
play(data, sample_rate=sample_rate, name=key)
else:
from paderbox.utils.mapping import DispatchError
raise DispatchError(key, self['audios'].keys())
| 31.240602 | 121 | 0.536221 | 8,065 | 0.970517 | 0 | 0 | 87 | 0.010469 | 0 | 0 | 1,780 | 0.2142 |
63cc52215ee9f6507c0099207c8677db1e3b4142 | 907 | py | Python | venv/FlightPlan2.py | zowegoop/DroneMapping | 79d8645f0050cc0114e733d16e44e354e1a959c5 | [
"MIT"
] | null | null | null | venv/FlightPlan2.py | zowegoop/DroneMapping | 79d8645f0050cc0114e733d16e44e354e1a959c5 | [
"MIT"
] | null | null | null | venv/FlightPlan2.py | zowegoop/DroneMapping | 79d8645f0050cc0114e733d16e44e354e1a959c5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Rescue mission in the mountains 2
# if running locally, you need to install the djitello module
# run this on your linux or mac machine
# pip3 install djitello
######## PARAMETERS ###########
fspeed = 117/10 # Foward Speed in cm/s (15cm/s)
aspeed = 360/10 # Angular Speed Degrees/s
interval = 0.25
dInterval = fSpeed*interval
aInterval = aSpeed*interval
###############################
# Install the module
from djitellopy import Tello
from time import sleep
# Create our tello drone object
drone = Tello()
# Take off and up
drone.connect()
drone.takeoff()
# Move up to 6 ft
# Convert everything to centimeters
tello.send_rc_control(0,0,50,0)
sleep(3.8)
# Rotate 12 degrees counter clockwise
tello.send_rc_control(0,0,0,-50)
sleep(.43)
# Move forward 436cm (14.3 ft)
tello.send_rc_control(0,50,0,0)
sleep(4)
tello.send_rc_control(0,0,0,0)
# Land the drone
tello.land() | 19.717391 | 61 | 0.701213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.553473 |
63cd13cafbe9b72881384584902ce2c4c485f091 | 43,992 | py | Python | ubertool/terrplant/tests/test_terrplant_unittest.py | qed-uber/ubertool | 472a143e110f634afdfe03d503e5f442b1e57b86 | [
"Unlicense"
] | 2 | 2016-01-06T20:20:51.000Z | 2016-03-05T13:26:19.000Z | ubertool/terrplant/tests/test_terrplant_unittest.py | qed-uber/ubertool | 472a143e110f634afdfe03d503e5f442b1e57b86 | [
"Unlicense"
] | 21 | 2017-08-02T18:00:16.000Z | 2019-08-20T15:57:09.000Z | ubertool/terrplant/tests/test_terrplant_unittest.py | quanted/ubertool | 472a143e110f634afdfe03d503e5f442b1e57b86 | [
"Unlicense"
] | null | null | null | import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..terrplant_exe import Terrplant
test = {}
class TestTerrplant(unittest.TestCase):
"""
Unit tests for terrplant.
"""
print("terrplant unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for terrplant unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open terrplant qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for terrplant unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_terrplant_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty terrplant object
terrplant_empty = Terrplant(df_empty, df_empty)
return terrplant_empty
# each of these functions are queued by "run_methods" and have outputs defined as properties in the terrplant qaqc csv
def test_terrplant_rundry(self):
"""
unittest for function terrplant.rundry
"""
#(self.application_rate/self.incorporation_depth) * self.runoff_fraction
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.5, 4.41, 6.048]
try:
terrplant_empty.application_rate = pd.Series([10, 21, 56], dtype='int')
terrplant_empty.incorporation_depth = pd.Series([2, 1, 4], dtype='int')
terrplant_empty.runoff_fraction = pd.Series([0.1, 0.21, 0.432 ], dtype='float')
result = terrplant_empty.run_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_runsemi(self):
"""
unittest for function terrplant.runsemi
"""
#self.out_runsemi = (self.application_rate/self.incorporation_depth) * self.runoff_fraction * 10
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [5.0, 2.5, 19.0]
try:
terrplant_empty.application_rate = pd.Series([10, 20, 30], dtype='int')
terrplant_empty.incorporation_depth = pd.Series([2, 4, 3], dtype='int')
terrplant_empty.runoff_fraction = pd.Series([0.1, 0.05, 0.19], dtype='float')
result = terrplant_empty.run_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_spray(self):
"""
unittest for function terrplant.spray
"""
#self.out_spray = self.application_rate * self.drift_fraction
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [5.0, 5.36, 19.05]
try:
terrplant_empty.application_rate = pd.Series([10, 20, 30], dtype='int')
terrplant_empty.drift_fraction = pd.Series([0.5, .268, 0.635], dtype='float')
result = terrplant_empty.spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_totaldry(self):
"""
unittest for function terrplant.totaldry
"""
#self.out_totaldry = self.out_rundry + self.out_spray
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results =[5.5, 15.65, 35.32]
try:
terrplant_empty.out_run_dry = pd.Series([0.5, 3.65, 12.32], dtype='float')
terrplant_empty.out_spray = pd.Series([5.0, 12.0, 23.0], dtype='float')
result = terrplant_empty.total_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_totalsemi(self):
"""
unittest for function terrplant.totalsemi
"""
#self.out_totalsemi = self.out_runsemi + self.out_spray
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [5.034, 46.52, 71.669, ]
try:
terrplant_empty.out_run_semi = pd.Series([5.0, 12.32, 59.439], dtype='float')
terrplant_empty.out_spray = pd.Series([0.034, 34.2, 12.23], dtype='float')
result = terrplant_empty.total_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_rq_dry(self):
"""
unittest for function terrplant.nms_rq_dry
"""
#self.out_nms_rq_dry = self.out_totaldry/self.ec25_nonlisted_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [110.0, 1.45211, 0.0669796]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 17.89, 23.12345], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_monocot = pd.Series([0.05, 12.32, 345.231], dtype='float')
result = terrplant_empty.nms_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_loc_dry(self):
"""
unittest for function terrplant.nms_loc_dry
"""
# if self.out_nms_rq_dry >= 1.0:
# self.out_nms_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a dry area indicates a potential risk.')
# else:
# self.out_nms_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a dry area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed monocot seedlings exposed to the "
"pesticide via runoff to dry areas indicates a potential risk.",
"The risk quotient for non-listed monocot seedlings exposed to "
"the pesticide via runoff to dry areas indicates that potential "
"risk is minimal.", "The risk quotient for non-listed monocot "
"seedlings exposed to the pesticide via runoff to dry areas indicates "
"a potential risk."])
try:
terrplant_empty.out_nms_rq_dry = pd.Series([1.0, 0.5, 3.5], dtype='float')
result = terrplant_empty.loc_nms_dry()
pdt.assert_series_equal(result,expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_rq_semi(self):
"""
unittest for function terrplant.nms_rq_semi
"""
#self.out_nms_rq_semi = self.out_totalsemi/self.ec25_nonlisted_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [200.0, 4.197279, 16.18354]
try:
terrplant_empty.out_total_semi = pd.Series([10., 1.234, 23.984], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_monocot = pd.Series([0.05, 0.294, 1.482], dtype='float')
result = terrplant_empty.nms_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_out_nms_loc_semi(self):
"""
unittest for function terrplant.nms_loc_semi
"""
# if self.out_nms_rq_semi >= 1.0:
# self.out_nms_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a semi-aquatic area indicates a potential risk.')
# else:
# self.out_nms_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via runoff to a semi-aquatic area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed monocot seedlings exposed to the "
"pesticide via runoff to semi-aquatic areas indicates a potential "
"risk.", "The risk quotient for non-listed monocot seedlings exposed "
"to the pesticide via runoff to semi-aquatic areas indicates that "
"potential risk is minimal.", "The risk quotient for non-listed monocot "
"seedlings exposed to the pesticide via runoff to semi-aquatic areas "
"indicates a potential risk."])
try:
terrplant_empty.out_nms_rq_semi = pd.Series([1.0, 0.45, 2.7], dtype='float')
result = terrplant_empty.loc_nms_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_rq_spray(self):
"""
unittest for function terrplant.nms_rq_spray
"""
#self.out_nms_rq_spray = self.out_spray/out__min_nms_spray
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [215.5062, 1.896628, 16.60117]
try:
terrplant_empty.out_spray = pd.Series([5.045, 2.43565, 9.04332], dtype='float')
terrplant_empty.out_min_nms_spray = pd.Series([0.02341, 1.2842, 0.54474], dtype='float')
result = terrplant_empty.nms_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_loc_spray(self):
"""
unittest for function terrplant.nms_loc_spray
"""
# if self.out_nms_rq_spray >= 1.0:
# self.out_nms_loc_spray = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_nms_loc_spray = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed monocot seedlings exposed to the pesticide via "
"spray drift indicates a potential risk.", "The risk quotient for non-listed monocot "
"seedlings exposed to the pesticide via spray drift indicates that potential risk "
"is minimal.", "The risk quotient for non-listed monocot seedlings exposed to the "
"pesticide via spray drift indicates a potential risk."])
try:
terrplant_empty.out_nms_rq_spray = pd.Series([2.2, 0.0056, 1.0], dtype='float')
result = terrplant_empty.loc_nms_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_rq_dry(self):
"""
unittest for function terrplant.lms_rq_dry
"""
#self.out_lms_rq_dry = self.out_totaldry/self.ec25_nonlisted_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [550.0, 3.40279, 234.0831]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 1.094, 19.5436], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_monocot = pd.Series([0.01, 0.3215, 0.08349], dtype='float')
result = terrplant_empty.lms_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_loc_dry(self):
"""
unittest for function terrplant.lms_loc_dry
"""
# if self.out_lms_rq_dry >= 1.0:
# self.out_lms_loc_dry = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a dry area indicates a potential risk.')
# else:
# self.out_lms_loc_dry = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to a dry area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed monocot seedlings exposed to the pesticide "
"via runoff to dry areas indicates a potential risk.", "The risk quotient "
"for listed monocot seedlings exposed to the pesticide via runoff to dry "
"areas indicates that potential risk is minimal.", "The risk quotient for "
"listed monocot seedlings exposed to the pesticide via runoff to dry areas "
"indicates a potential risk."])
try:
terrplant_empty.out_lms_rq_dry = pd.Series([1.6, 0.045, 1.0], dtype='float')
result = terrplant_empty.loc_lms_dry()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_rq_semi(self):
"""
unittest for function terrplant.lms_rq_semi
"""
#self.out_lms_rq_semi = self.out_totalsemi/self.ec25_nonlisted_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [1000.0, 0.0217295, 72.19618]
try:
terrplant_empty.out_total_semi = pd.Series([10., 0.099, 24.5467], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_monocot = pd.Series([0.01, 4.556, 0.34], dtype='float')
result = terrplant_empty.lms_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_loc_semi(self):
"""
unittest for function terrplant.lms_loc_semi
"""
# if self.out_lms_rq_semi >= 1.0:
# self.out_lms_loc_semi = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a semi-aquatic area indicates a potential risk.')
# else:
# self.out_lms_loc_semi = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to a semi-aquatic area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed monocot seedlings exposed to the pesticide via "
"runoff to semi-aquatic areas indicates a potential risk.", "The risk quotient "
"for listed monocot seedlings exposed to the pesticide via runoff to "
"semi-aquatic areas indicates that potential risk is minimal.", "The risk "
"quotient for listed monocot seedlings exposed to the pesticide via runoff "
"to semi-aquatic areas indicates a potential risk."])
try:
terrplant_empty.out_lms_rq_semi = pd.Series([1.0, 0.9, 6.456], dtype= 'float')
result = terrplant_empty.loc_lms_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_rq_spray(self):
"""
unittest for function terrplant.lms_rq_spray
"""
#self.out_lms_rq_spray = self.out_spray/self.ec25_nonlisted_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [500.0, 3.754362, 0.04772294]
try:
terrplant_empty.out_spray = pd.Series([5., 9.1231, 0.09231], dtype='float')
terrplant_empty.out_min_lms_spray = pd.Series([0.01, 2.43, 1.93429], dtype='float')
result = terrplant_empty.lms_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_loc_spray(self):
"""
unittest for function terrplant.lms_loc_spray
"""
# if self.out_lms_rq_spray >= 1.0:
# self.out_lms_loc_spray = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_lms_loc_spray = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed monocot seedlings exposed "
"to the pesticide via spray drift indicates a potential "
"risk.", "The risk quotient for listed monocot seedlings "
"exposed to the pesticide via spray drift indicates that "
"potential risk is minimal.", "The risk quotient for "
"listed monocot seedlings exposed to the pesticide via "
"spray drift indicates a potential risk."])
try:
terrplant_empty.out_lms_rq_spray = pd.Series([1.1, 0.99, 3.129], dtype= 'float')
result = terrplant_empty.loc_lms_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_rq_dry(self):
"""
unittest for function terrplant.nds_rq_dry
"""
#self.out_nds_rq_dry = self.out_totaldry/self.noaec_listed_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [275., 1.012424, 9.062258]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 1.0023, 19.32436], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_dicot = pd.Series([0.02, 0.99, 2.1324], dtype='float')
result = terrplant_empty.nds_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_loc_dry(self):
"""
unittest for function terrplant.nds_loc_dry
"""
# if self.out_nds_rq_dry >= 1.0:
# self.out_nds_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to dry areas indicates a potential risk.')
# else:
# self.out_nds_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via runoff to dry areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via runoff to dry areas indicates a potential "
"risk.", "The risk quotient for non-listed dicot seedlings "
"exposed to the pesticide via runoff to dry areas indicates "
"that potential risk is minimal.", "The risk quotient for "
"non-listed dicot seedlings exposed to the pesticide via runoff "
"to dry areas indicates a potential risk."])
try:
terrplant_empty.out_nds_rq_dry = pd.Series([2.7, 0.923, 1.0], dtype='float')
result = terrplant_empty.loc_nds_dry()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_rq_semi(self):
"""
unittest for function terrplant.nds_rq_semi
"""
#self.out_nds_rq_semi = self.out_totalsemi/self.noaec_listed_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [500., 3.464141, 0.999986]
try:
terrplant_empty.out_total_semi = pd.Series([10., 3.4295, 12.82323], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_dicot = pd.Series([0.02, 0.99, 12.8234], dtype='float')
result = terrplant_empty.nds_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_loc_semi(self):
"""
unittest for function terrplant.nds_loc_semi
"""
# if self.out_nds_rq_semi >= 1.0:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to semi-aquatic areas indicates a potential risk.')
# else:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via runoff to semi-aquatic areas indicates a potential "
"risk.", "The risk quotient for non-listed dicot seedlings exposed "
"to the pesticide via runoff to semi-aquatic areas indicates that "
"potential risk is minimal.", "The risk quotient for non-listed "
"dicot seedlings exposed to the pesticide via runoff to semi-aquatic "
"areas indicates a potential risk."])
try:
terrplant_empty.out_nds_rq_semi = pd.Series([1.7, 0.001, 2.3134], dtype='float')
result = terrplant_empty.loc_nds_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_rq_spray(self):
"""
unittest for function terrplant.nds_rq_spray
"""
#self.out_nds_rq_spray = self.out_spray/self.noaec_listed_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [235.5158, 0.2584818, 1.994142]
try:
terrplant_empty.out_spray = pd.Series([5., 0.9912, 23.9321], dtype='float')
terrplant_empty.out_min_nds_spray = pd.Series([0.02123, 3.8347, 12.0012], dtype='float')
result = terrplant_empty.nds_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_loc_spray(self):
"""
unittest for function terrplant.nds_loc_spray
"""
# if self.out_nds_rq_spray >= 1.0:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via spray drift indicates a potential risk.", "The "
"risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via spray drift indicates that potential risk is "
"minimal.", "The risk quotient for non-listed dicot seedlings "
"exposed to the pesticide via spray drift indicates a potential risk."])
try:
terrplant_empty.out_nds_rq_spray = pd.Series([1.2, 0.439, 3.9921], dtype='float')
result = terrplant_empty.loc_nds_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_rq_dry(self):
"""
unittest for function terrplant.lds_rq_dry
"""
#self.out_lds_rq_dry = self.out_totaldry/self.noaec_listed_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [55., 1.001862, 6.043703]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 0.991843, 12.7643], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_dicot = pd.Series([0.1, .99, 2.112], dtype='float')
result = terrplant_empty.lds_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_loc_dry(self):
"""
unittest for function terrplant.lds_loc_dry
"""
# if self.out_lds_rq_dry >= 1.0:
# self.out_lds_loc_dry = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to dry areas indicates a potential risk.')
# else:
# self.out_lds_loc_dry = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to dry areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed dicot seedlings exposed to the "
"pesticide via runoff to dry areas indicates a potential "
"risk.", "The risk quotient for listed dicot seedlings exposed "
"to the pesticide via runoff to dry areas indicates that "
"potential risk is minimal.", "The risk quotient for listed "
"dicot seedlings exposed to the pesticide via runoff to dry "
"areas indicates a potential risk."])
try:
terrplant_empty.out_lds_rq_dry = pd.Series([1.5, 0.00856, 4.2893], dtype= 'float')
result = terrplant_empty.loc_lds_dry()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_rq_semi(self):
"""
unittest for function terrplant.lds_rq_semi
"""
#self.out_lds_rq_semi = self.out_totalsemi/self.noaec_listed_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [100., 2502.0289, 16.08304]
try:
terrplant_empty.out_total_semi = pd.Series([10., 0.8632, 34.2321], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_dicot = pd.Series([0.1, 0.000345, 2.12846], dtype='float')
result = terrplant_empty.lds_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_loc_semi(self):
"""
unittest for function terrplant.lds_loc_semi
"""
# if self.out_lds_rq_semi >= 1.0:
# self.out_lds_loc_semi = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to semi-aquatic areas indicates a potential risk.')
# else:
# self.out_lds_loc_semi = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed dicot seedlings exposed to the "
"pesticide via runoff to semi-aquatic areas indicates a potential "
"risk.", "The risk quotient for listed dicot seedlings exposed to "
"the pesticide via runoff to semi-aquatic areas indicates that "
"potential risk is minimal.", "The risk quotient for listed dicot "
"seedlings exposed to the pesticide via runoff to semi-aquatic "
"areas indicates a potential risk."])
try:
terrplant_empty.out_lds_rq_semi = pd.Series([4.5, 0.0028, 1.0], dtype= 'float')
result = terrplant_empty.loc_lds_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_rq_spray(self):
"""
unittest for function terrplant.lds_rq_spray
"""
#self.out_lds_rq_spray = self.out_spray/self.noaec_listed_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [250., 0.7105719, 1.28799]
try:
terrplant_empty.out_spray = pd.Series([5.0, 0.94435, 12.7283], dtype='float')
terrplant_empty.out_min_lds_spray = pd.Series([0.02, 1.329, 9.8823], dtype='float')
result = terrplant_empty.lds_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_loc_spray(self):
"""
unittest for function terrplant.lds_loc_spray
"""
# if self.out_lds_rq_spray >= 1.0:
# self.out_lds_loc_spray = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_lds_loc_spray = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed dicot seedlings exposed to the "
"pesticide via spray drift indicates a potential risk.", "The "
"risk quotient for listed dicot seedlings exposed to the "
"pesticide via spray drift indicates that potential risk is "
"minimal.", "The risk quotient for listed dicot seedlings "
"exposed to the pesticide via spray drift indicates a potential "
"risk."])
try:
terrplant_empty.out_lds_rq_spray = pd.Series([1.8, 0.956, 3.25], dtype='float')
result = terrplant_empty.loc_lds_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_nms_spray(self):
"""
unittest for function terrplant.min_nms_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.0501, 0.9999, 1.9450]
try:
terrplant_empty.ec25_nonlisted_seedling_emergence_monocot = pd.Series([0.0501, 1.0004, 12.943], dtype='float')
terrplant_empty.ec25_nonlisted_vegetative_vigor_monocot = pd.Series([0.0801, 0.9999, 1.9450], dtype='float')
result = terrplant_empty.min_nms_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_lms_spray(self):
"""
unittest for function terrplant.min_lms_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.0205, 1.9234, 0.000453]
try:
terrplant_empty.noaec_listed_vegetative_vigor_monocot = pd.Series([0.0211, 1.9234, 0.001112], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_monocot = pd.Series([0.0205, 3.231, 0.000453], dtype='float')
result = terrplant_empty.min_lms_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_nds_spray(self):
"""
unittest for function terrplant.min_nds_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.0325, 0.00342, 1.3456]
try:
terrplant_empty.ec25_nonlisted_vegetative_vigor_dicot = pd.Series([0.0325, 3.432, 1.3456], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_dicot = pd.Series([0.5022, 0.00342, 1.34567], dtype='float')
result = terrplant_empty.min_nds_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_lds_spray(self):
"""
unittest for function terrplant.min_lds_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.3206, 1.00319, 12.32]
try:
terrplant_empty.noaec_listed_seedling_emergence_dicot = pd.Series([0.3206, 1.0032, 43.4294], dtype='float')
terrplant_empty.noaec_listed_vegetative_vigor_dicot = pd.Series([0.5872, 1.00319, 12.32], dtype='float')
result = terrplant_empty.min_lds_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# unittest will
# 1) call the setup method,
# 2) then call every method starting with "test",
# 3) then the teardown method
if __name__ == '__main__':
unittest.main() | 49.098214 | 122 | 0.618044 | 43,424 | 0.987089 | 0 | 0 | 0 | 0 | 0 | 0 | 17,376 | 0.394981 |
63cd283958618a19e423133bb4fbcee1918c23d9 | 3,851 | py | Python | mnist.py | a25765564/MLtest | 03fdb97cd215fc2a1d6d584d76f74eb94ffb2266 | [
"MIT"
] | null | null | null | mnist.py | a25765564/MLtest | 03fdb97cd215fc2a1d6d584d76f74eb94ffb2266 | [
"MIT"
] | null | null | null | mnist.py | a25765564/MLtest | 03fdb97cd215fc2a1d6d584d76f74eb94ffb2266 | [
"MIT"
] | null | null | null | import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def xavier_init(fan_in,fan_out,constant = 1):
low = -constant * np.sqrt(6.0 /(fan_in + fan_out))
high = constant * np.sqrt(6.0 /(fan_in + fan_out))
return tf.random_uniform((fan_in,fan_out),minval= low,maxval = high,dtype = tf.float32)
class AdditivaGaussianNoiseAutoencoder(object):
def __init__(self,n_input,n_hidden,transfer_function= tf.nn.softplus,optimizer= tf.train.AdamOptimizer(),scale=0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.x = tf.placeholder(tf.float32,[None,self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x+scale * tf.random_normal((n_input,)),
self.weights['w1']),self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden,self.weights['w2']),self.weights['b2'])
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction,self.x),2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input,self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden],dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden,self.n_input],dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input],dtype = tf.float32))
return all_weights
def partial_fit(self,X):
cost, opt = self.sess.run((self.cost,self.optimizer),feed_dict= {self.x:X,self.scale:self.training_scale})
return cost
def calc_total_cost(self,X):
return self.sess.run(self.cost,feed_dict={self.x:X,self.scale:self.training_scale})
def transform(self,X):
return self.sess.run(self.hidden,feed_dict={self.x:X,self.scale:self.training_scale})
def generate(self,hidden = None):
if hidden is none:
hidden = np.random.normal(size = self.weights["b1"])
return self.sess.run(self.reconstruction,feed_dict= {self.hidden:hidden})
def reconstruct(self,X):
return self.sess.run(self.reconstruction,feed_dict={self.x:X,self.scale:self.training_scale})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBisses(self):
return self.sess.run(self.weights['b1'])
mnist = input_data.read_data_sets('MNIST_DATA',one_hot=True)
def standard_scale(X_train,X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train,X_test
def get_random_block_from_data(data,batch_size):
start_index = np.random.randint(0,len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_tain,X_test = standard_scale(mnist.train.images,mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 200
batch_size = 128
display_step = 1
autoencoder = AdditivaGaussianNoiseAutoencoder(n_input = 784,n_hidden= 200,transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
scale = 0.01)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples /batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_tain,batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost /n_samples * batch_size
if epoch % display_step == 0:
print("Epoch:",'%04d'%(epoch+1),"cost=","{:.9f}".format(avg_cost))
print ("total cost:" + str(autoencoder.calc_total_cost(X_test)))
| 44.77907 | 118 | 0.731758 | 2,195 | 0.569982 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.025448 |
63cd37f1d19aa602021ac5d7d2664ac5f4cbcbd3 | 1,296 | py | Python | images/migrations/0001_initial.py | Hoofeycheng/Bookmarks | f2721633cd39393f0c92993579071679bb975ab0 | [
"MIT"
] | null | null | null | images/migrations/0001_initial.py | Hoofeycheng/Bookmarks | f2721633cd39393f0c92993579071679bb975ab0 | [
"MIT"
] | null | null | null | images/migrations/0001_initial.py | Hoofeycheng/Bookmarks | f2721633cd39393f0c92993579071679bb975ab0 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.4 on 2019-01-21 03:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('slug', models.CharField(blank=True, max_length=200)),
('image', models.ImageField(upload_to='image/%Y')),
('url', models.URLField()),
('description', models.TextField(blank=True)),
('created', models.DateField(auto_now_add=True, db_index=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images_created', to=settings.AUTH_USER_MODEL)),
('user_like', models.ManyToManyField(blank=True, related_name='image_liked', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'images',
},
),
]
| 37.028571 | 149 | 0.604938 | 1,137 | 0.877315 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.141204 |
63cd6a4844d309d64fcae3b172caa3687fb25c61 | 951 | py | Python | src/python/detectors/os_command_injection/os_command_injection.py | martinschaef/amazon-codeguru-reviewer-python-detectors | 7452471b7ac5e1f2e1a0bfbd0f615d98f160e0e4 | [
"Apache-2.0"
] | 18 | 2022-01-27T22:50:22.000Z | 2022-02-15T17:41:24.000Z | src/python/detectors/os_command_injection/os_command_injection.py | martinschaef/amazon-codeguru-reviewer-python-detectors | 7452471b7ac5e1f2e1a0bfbd0f615d98f160e0e4 | [
"Apache-2.0"
] | 1 | 2022-01-31T21:36:18.000Z | 2022-02-22T17:09:54.000Z | src/python/detectors/os_command_injection/os_command_injection.py | martinschaef/amazon-codeguru-reviewer-python-detectors | 7452471b7ac5e1f2e1a0bfbd0f615d98f160e0e4 | [
"Apache-2.0"
] | 7 | 2022-02-10T21:50:50.000Z | 2022-03-28T14:21:10.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# {fact rule=os-command-injection@v1.0 defects=1}
def exec_command_noncompliant():
from paramiko import client
from flask import request
address = request.args.get("address")
cmd = "ping -c 1 %s" % address
client = client.SSHClient()
client.connect("ssh.samplehost.com")
# Noncompliant: address argument is not sanitized.
client.exec_command(cmd)
# {/fact}
# {fact rule=os-command-injection@v1.0 defects=0}
def exec_command_compliant():
from paramiko import client
from flask import request
address = request.args.get("address")
# Compliant: address argument is sanitized (shell-escaped).
address = shlex.quote(request.args.get("address"))
cmd = "ping -c 1 %s" % address
client = client.SSHClient()
client.connect("ssh.samplehost.com")
client.exec_command(cmd)
# {/fact}
| 32.793103 | 69 | 0.701367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.449001 |
63cdddd1832970e9972b2c05b624ca631cca30ea | 922 | py | Python | src/Dao/Parts.py | asarfara/PartCost | 99b7485ad811d41ca86a61b922faf51facdabcd3 | [
"MIT"
] | null | null | null | src/Dao/Parts.py | asarfara/PartCost | 99b7485ad811d41ca86a61b922faf51facdabcd3 | [
"MIT"
] | null | null | null | src/Dao/Parts.py | asarfara/PartCost | 99b7485ad811d41ca86a61b922faf51facdabcd3 | [
"MIT"
] | null | null | null | import logging
import json
from typing import List
from src.Entity.Part import Part
from datetime import date
from sqlite3 import Connection
class Parts:
def __init__(self, file_name: str, logger: logging, connection: Connection):
self.file_name = file_name
self.logger = logger
self.connection = connection
def insert_parts(self, parts: List[Part]):
"""Insert collection of parts into the database.
Args:
parts (List[Part]): Collection of parts.
"""
cursor = self.connection.cursor()
for part in parts:
self.logger.debug("Inserting into price_parts {0}".format(json.dumps(part.__dict__)))
cursor.execute('INSERT INTO parts_price (name, price, supplier, type, date) VALUES (?,?,?,?,?)', [part.name, part.price, part.supplier, part.type, date.today()])
self.connection.commit()
return None
| 28.8125 | 173 | 0.654013 | 777 | 0.842733 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.260304 |
63d0e3d64e13f20d2c0c373757cdad7580a1300b | 4,300 | py | Python | src/utils/plots.py | Light4Code/tensorflow-research | 392c2d7bc376f491fec68d479b130f883d6d028d | [
"MIT"
] | 5 | 2020-02-29T16:28:55.000Z | 2021-11-24T07:47:36.000Z | src/utils/plots.py | Light4Code/tensorflow-research | 392c2d7bc376f491fec68d479b130f883d6d028d | [
"MIT"
] | 3 | 2020-11-13T18:41:57.000Z | 2022-02-10T01:37:51.000Z | src/utils/plots.py | Light4Code/tensorflow-research | 392c2d7bc376f491fec68d479b130f883d6d028d | [
"MIT"
] | 4 | 2020-03-24T10:50:17.000Z | 2020-06-02T13:07:28.000Z | import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import utils.image_util as iu
from utils.custom_types import Vector
def plot_history(loss, acc, val_loss, val_acc):
plt.figure(figsize=(20, 10))
plt.subplot(2, 1, 1)
plt.title("Loss")
plt.grid()
plt.plot(loss)
plt.plot(val_loss)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["Train", "Test"], loc="upper left")
plt.subplot(2, 1, 2)
plt.title("Accuracy")
plt.grid()
plt.plot(acc)
plt.plot(val_acc)
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["Train", "Test"], loc="upper left")
plt.show()
def plot_difference(
predictions, test_images, input_shape: Vector, threshold: float = 0.0
):
plt.figure(figsize=(20, 10))
pred_count = len(predictions)
plt_shape = (input_shape[0], input_shape[1])
plt_cmap = "gray"
if input_shape[2] > 1:
plt_shape = (
input_shape[0],
input_shape[1],
input_shape[2],
)
index = 1
plt_index = 0
for test_image in test_images:
original_image = test_image.reshape(plt_shape)
pred_image = predictions[plt_index].reshape(plt_shape)
diff, se = iu.create_diff(original_image, pred_image, threshold)
mask = ma.masked_where(diff == False, diff)
plt.subplot(pred_count, 4, index)
plt.title("Original")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 4, index)
plt.title("Prediction")
plt.imshow(pred_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 4, index)
plt.title("Diff (SE: {0})".format(round(se, 2)))
plt.imshow(diff, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 4, index)
plt.title("Overlay")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
plt.imshow(mask, cmap="jet", interpolation="none", alpha=0.7)
index += 1
plt_index += 1
plt.show()
def plot_prediction(
predictions, test_images, input_shape: Vector, threshold: float = 0.4
):
plt.figure(figsize=(20, 10))
pred_count = len(predictions)
plt_shape = (input_shape[0], input_shape[1])
plt_cmap = "gray"
if input_shape[2] > 1:
plt_shape = (
input_shape[0],
input_shape[1],
input_shape[2],
)
index = 1
plt_index = 0
for test_image in test_images:
original_image = test_image.reshape(plt_shape)
pred_image = predictions[plt_index].reshape(plt_shape)
mask = ma.masked_where(pred_image < threshold, pred_image)
plt.subplot(pred_count, 3, index)
plt.title("Original")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 3, index)
plt.title("Prediction")
plt.imshow(pred_image, interpolation="none", cmap=plt_cmap)
index += 1
plt.subplot(pred_count, 3, index)
plt.title("Overlay")
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
plt.imshow(mask, cmap="jet", interpolation="none", alpha=0.7)
index += 1
plt_index += 1
plt.show()
def plot_classification(predictions, test_images, input_shape: Vector, classes: [], threshold: float = 0.4):
plt.figure(figsize=(20, 10))
pred_count = len(predictions)
plt_shape = (input_shape[0], input_shape[1])
plt_cmap = "gray"
if input_shape[2] > 1:
plt_shape = (
input_shape[0],
input_shape[1],
input_shape[2],
)
index = 1
plt_index = 0
for test_image in test_images:
original_image = test_image.reshape(plt_shape)
pred = predictions[plt_index]
c_idx = np.argmax(pred)
plt.subplot(pred_count, 1, index)
value = pred[c_idx]
if (value >= threshold):
plt.title("{0} ({1})".format(classes[c_idx], value))
else:
plt.title("{0} ({1})".format("Unknown", value))
plt.imshow(original_image, interpolation="none", cmap=plt_cmap)
index += 1
plt_index += 1
plt.show()
| 31.851852 | 108 | 0.608837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.06814 |
63d33b671fe35061f0300b274a32b66e28d0a7ac | 80 | py | Python | pypi_starter/__init__.py | wuhaifengdhu/pypi-starter | 3ccb80dd9490f9d65b986350d82f9a20743af17f | [
"Apache-2.0"
] | null | null | null | pypi_starter/__init__.py | wuhaifengdhu/pypi-starter | 3ccb80dd9490f9d65b986350d82f9a20743af17f | [
"Apache-2.0"
] | null | null | null | pypi_starter/__init__.py | wuhaifengdhu/pypi-starter | 3ccb80dd9490f9d65b986350d82f9a20743af17f | [
"Apache-2.0"
] | null | null | null | """
Your application
"""
from submodule.main import *
from main import *
| 11.428571 | 29 | 0.65 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.325 |
63d512d7adf908df92c2f691160d882344e7b9d4 | 4,190 | py | Python | pocx/funcs/fofa.py | antx-code/pocx | c11d3a2bed598f21a32c29ac2b27e098e9dede96 | [
"MIT"
] | 9 | 2022-03-11T01:55:23.000Z | 2022-03-26T03:10:53.000Z | pocx/funcs/fofa.py | antx-code/pocx | c11d3a2bed598f21a32c29ac2b27e098e9dede96 | [
"MIT"
] | null | null | null | pocx/funcs/fofa.py | antx-code/pocx | c11d3a2bed598f21a32c29ac2b27e098e9dede96 | [
"MIT"
] | 1 | 2022-03-26T03:10:56.000Z | 2022-03-26T03:10:56.000Z | import httpx
import json
import base64
from loguru import logger
import urllib3
import ssl
try:
ssl_context = httpx.create_ssl_context()
except:
ssl_context = ssl.create_default_context()
ssl_context.options ^= ssl.OP_NO_TLSv1 # Enable TLS 1.0 back
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
httpx._config.DEFAULT_CIPHERS += ":ALL:@SECLEVEL=1"
class Fofa():
@logger.catch(level='ERROR')
def __init__(self):
"""
Initialize the Fofa API.
"""
self.key = ''
self.email = ''
self.domain = ''
@logger.catch(level='ERROR')
def set_config(self, api_key: str, api_email: str, domain: str = 'fofa.info'):
"""
Setting the configuration for the Fofa API.
:param api_key: the fofa api key
:param api_email: the fofa account email
:param domain: the fofa domain
:return:
"""
self.key = api_key
self.email = api_email
self.domain = domain
@logger.catch(level='ERROR')
def _grammar_b64(self, grammar: str):
"""
Transform the grammar to base64.
:param grammar: the fofa search grammar
:return: the base64 grammar
"""
b64 = base64.b64encode(grammar.encode()).decode()
for i in range(b64.count('=')):
b64.replace('=', '%3D')
return b64
@logger.catch(level='ERROR')
def _search(self, grammar: str, page: int = 1, size: int = 100):
"""
The core method for searching the fofa.
:param grammar: the search grammar
:param page: the page to search
:param size: the size of the page
:return: the search results
"""
b64 = self._grammar_b64(grammar)
furl = f'https://{self.domain}/api/v1/search/all?email={self.email}&key={self.key}&qbase64={b64}&{grammar}&page={page}&size={size}'
try:
assets = httpx.get(furl).content.decode('utf-8')
except Exception as _:
assets = httpx.get(furl, verify=ssl_context).content.decode('utf-8')
result = json.loads(assets)
if not result['error']:
return result
logger.error(f'Fofa API error: {result["errmsg"]}')
return None
@logger.catch(level='ERROR')
def assets(self, grammar: str, page: int = 1, size: int = 100):
"""
Gain the assets from the fofa.
:param grammar: the search grammar
:param page: the page to search
:param size: the size of the page
:return: the fofa assets
"""
results = self._search(grammar, page, size)
targets = []
if not results:
return targets
for asset in results['results']:
target = f'https://{asset[1]}:{asset[2]}' if int(asset[2]) == 443 else f'http://{asset[1]}:{asset[2]}'
targets.append(target)
return list(set(targets))
@logger.catch(level='ERROR')
def asset_counts(self, grammar: str):
"""
Get the asset counts from the fofa, which search the given grammar.
:param grammar: the search grammar
:return: the asset counts
"""
results = self._search(grammar, 1, 1)
if not results:
return 0
return results['size']
@logger.catch(level='ERROR')
def asset_pages(self, grammar: str, size: int = 100):
"""
Get the asset pages from the fofa, which search the given grammar.
:param grammar: the search grammar
:param size: the size of the page
:return: the pages of the asset counts
"""
results = self._search(grammar, 1, 1)
if not results:
return 1
all_counts = results['size']
if all_counts >= 10000:
logger.warning("Fofa's asset counts is {all_counts}, which is too much, so we only search the first 10000.")
count = 10000 % size
pages = 10000 // size if count == 0 else 10000 // size + 1
else:
count = results['size'] % size
pages = all_counts // size if count == 0 else all_counts // size + 1
return pages
| 30.362319 | 139 | 0.582816 | 3,807 | 0.908592 | 0 | 0 | 3,753 | 0.895704 | 0 | 0 | 1,721 | 0.41074 |
63d54e098e0531437161c2fb674cb796cbee3e48 | 4,783 | py | Python | check_ec2_events.py | chartbeat/check_ec2_events | 3c72752dcb2908ec8c7c4eaf7e5788c6f88ec5aa | [
"Apache-2.0"
] | 2 | 2015-11-05T11:38:09.000Z | 2019-04-21T12:22:30.000Z | check_ec2_events.py | chartbeat/check_ec2_events | 3c72752dcb2908ec8c7c4eaf7e5788c6f88ec5aa | [
"Apache-2.0"
] | null | null | null | check_ec2_events.py | chartbeat/check_ec2_events | 3c72752dcb2908ec8c7c4eaf7e5788c6f88ec5aa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Author: Justin Lintz
# Copyright 2013 Chartbeat
# http://www.chartbeat.com
#
# Nagios check to alert on any retiring instances or
# instances that need rebooting
#
import getopt
import sys
import re
from datetime import datetime
from datetime import timedelta
from boto.ec2 import connect_to_region
from boto.exception import EC2ResponseError
# Setup IAM User with read-only EC2 access
KEY_ID = ""
ACCESS_KEY = ""
REGION = "us-east-1"
OK = 0
WARNING = 1
CRITICAL = 2
UNKNOWN = 3
def get_instances(instance_ids):
"""
Return an Instance objects for the given instance ids
@param instance_ids: Instance ids (list)
@return: Instance objects (dict)
"""
instances = dict()
conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)
try:
reservations = conn.get_all_instances(instance_ids)
except EC2ResponseError, ex:
print 'Got exception when calling EC2 for instances (%s): %s' % \
(", ".join(instance_ids), ex.error_message)
return instances
for r in reservations:
if len(r.instances) and r.instances[0].id in instance_ids:
instances[r.instances[0].id] = r.instances[0].tags["Name"]
return instances
class AmazonEventCheck(object):
"""
Nagios check for the Amazon events.
Will warn/error if any pending events based on time till event occurs
"""
def __init__(self):
pass
def _get_instances_pending_events(self):
"""
Get list of instances that have pending events.
@return: List(Instance, String , Datetime), List of (Instance, instance
Event, Scheduled Date) for hosts with pending events
"""
conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)
stats = conn.get_all_instance_status()
next_token = stats.next_token
while next_token != None:
next_stats = conn.get_all_instance_status(next_token=next_token)
stats.extend(next_stats)
next_token = next_stats.next_token
ret = []
for stat in stats:
if stat.events:
for event in stat.events:
if re.match('^\[Completed\]', event.description):
continue
ret.append([stat.id, event.code, event.not_before])
if len(ret) > 0:
instances = get_instances([stat[0] for stat in ret])
for stat in ret:
stat.insert(1, instances[stat[0]])
return ret
def check(self, critical_threshold):
"""
Check pending instance events, alert if
event time is less than critical_threshold
Warn otherwise
@param critical_threshold: int, number of days before an event that nagios should alert
"""
events = self._get_instances_pending_events()
if not events:
print 'OK: no pending events'
return OK
critical_events = []
warning_events = []
for event in events:
event_time = datetime.strptime(event[3], '%Y-%m-%dT%H:%M:%S.000Z')
# Are we close enough to the instance event that we should alert?
if datetime.utcnow() > (event_time - timedelta(days=critical_threshold)):
critical_events.append(event)
else:
warning_events.append(event)
if critical_events:
print 'CRITICAL: instances with events in %d days - %s' % (critical_threshold, ", ".join(["%s(%s)" % (event[0], event[1]) for event in critical_events]))
return CRITICAL
print 'WARNING: instances with scheduled events %s' % (", ".join(["%s(%s)" % (event[0], event[1]) for event in warning_events]))
return WARNING
def usage():
print >> sys.stderr, 'Usage: %s [-h|--help] [-A <aws_access_key_id>] [-S <aws_secret_access_key>] [-R <region>] [-c <day>]' % sys.argv[0]
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hA:S:R:c:", ["help"])
except getopt.GetoptError:
usage()
return UNKNOWN
global KEY_ID, ACCESS_KEY, REGION
critical_threshold = 2
for o, a in opts:
if o in ("-h", "--help"):
usage()
return UNKNOWN
if o in ("-A"):
KEY_ID = a
if o in ("-S"):
ACCESS_KEY = a
if o in ("-R"):
REGION = a
if o in ("-c"):
critical_threshold = int(a)
if KEY_ID == "" or ACCESS_KEY == "":
usage()
return UNKNOWN
eventcheck = AmazonEventCheck()
return eventcheck.check(critical_threshold)
if __name__ == '__main__':
sys.exit(main())
| 30.081761 | 165 | 0.605896 | 2,588 | 0.541083 | 0 | 0 | 0 | 0 | 0 | 0 | 1,443 | 0.301693 |
63d5eb9388610a04ecb9974176a0d336a0098ead | 7,917 | py | Python | chc/api/CFunctionCandidateContract.py | Databean/CodeHawk-C | 98720753beb51e0bf5105f8f6838618292fbf55c | [
"MIT"
] | 10 | 2020-08-17T15:35:55.000Z | 2022-03-23T14:39:57.000Z | chc/api/CFunctionCandidateContract.py | kestreltechnology/CodeHawk-C | db0fa92fa630cd919f29021d464533f0e7170fed | [
"MIT"
] | 31 | 2020-07-17T05:45:43.000Z | 2021-05-29T04:49:49.000Z | chc/api/CFunctionCandidateContract.py | kestreltechnology/CodeHawk-C | db0fa92fa630cd919f29021d464533f0e7170fed | [
"MIT"
] | 3 | 2020-06-13T05:32:34.000Z | 2021-09-16T02:31:39.000Z | # ------------------------------------------------------------------------------
# CodeHawk C Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2017-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import xml.etree.ElementTree as ET
class CFunctionCandidateContract(object):
"""Representsa a function contract to collect analysis-produced requests."""
def __init__(self, cfilecontracts, xnode):
self.cfilecontracts = cfilecontracts
self.ixd = self.cfilecontracts.cfile.interfacedictionary
self.prd = self.cfilecontracts.cfile.predicatedictionary
self.xnode = xnode
self.name = self.xnode.get("name")
self.cfun = self.cfilecontracts.cfile.get_function_by_name(self.name)
self.api = self.cfun.api
self.signature = {} # name -> index nr
self.rsignature = {} # index nr -> name
self.postrequests = {} # index -> XPredicate
self.postguarantees = {} # index -> XPredicate
self._initialize(self.xnode)
def collect_post(self):
"""collect advertised post condition from this function's api"""
guarantees = self.api.get_postcondition_guarantees()
for g in guarantees:
if g.index in self.postconditions:
continue
self.postguarantees[g.index] = g
def add_postrequest(self, pc):
"""add post request from caller's function api"""
if pc.index in self.postconditions:
return
self.postrequests[pc.index] = pc
def add_datastructure_request(self, ckey, predicate):
"""add data structure request from caller's function spo"""
pc = self.convert_to_post(ckey, predicate)
if not ((ckey, pc.index) in self.datastructurerequests):
self.datastructurerequests[(ckey, pc.index)] = pc
def convert_to_post(self, ckey, p):
if p.is_initialized():
lval = p.get_lval()
offset = lval.get_offset()
if offset.is_field() and not offset.get_offset().has_offset():
fieldname = offset.get_fieldname()
fieldterm = self.ixd.mk_field_s_term(fieldname)
return self.ixd.mk_initialized_xpredicate(fieldterm)
def write_mathml_parameters(self, cnode):
for par in self.signature:
pnode = ET.Element("par")
pnode.set("name", par)
pnode.set("nr", str(self.signature[par]))
cnode.append(pnode)
def write_mathml_postrequests(self, cnode):
for pr in self.postrequests.values():
prnode = ET.Element("post")
prnode.set("status", "request")
prmnode = ET.Element("math")
pr.write_mathml(prmnode, self.rsignature)
cnode.append(prnode)
prnode.append(prmnode)
def write_mathml_postguarantees(self, cnode):
for p in self.postguarantees.values():
pnode = ET.Element("post")
pnode.set("status", "guarantee")
pmnode = ET.Element("math")
p.write_mathml(pmnode, self.rsignature)
cnode.append(pnode)
pnode.append(pmnode)
def write_mathml_postconditions(self, cnode):
for p in self.postconditions.values():
pnode = ET.Element("post")
pnode.set("status", "use")
pmnode = ET.Element("math")
p.write_mathml(pmnode, self.rsignature)
cnode.append(pnode)
pnode.append(pmnode)
def write_mathml_datastructurerequests(self, cnode):
for (ckey, predid) in self.datastructurerequests:
pred = self.ixd.get_xpredicate(predid)
structname = self.cfilecontracts.cfile.declarations.get_structname(ckey)
dnode = ET.Element("ds-request")
dnode.set("ckey", str(ckey))
dnode.set("predid", str(predid))
dnode.set("predicate", str(pred))
dnode.set("structname", structname)
mnode = ET.Element("math")
pred.write_mathml(mnode, self.rsignature)
dnode.append(mnode)
cnode.append(dnode)
def write_mathml(self, fnode):
parsnode = ET.Element("parameters")
ppnode = ET.Element("postconditions")
ssnode = ET.Element("sideeffects")
ddnode = ET.Element("data-structure-requests")
self.write_mathml_parameters(parsnode)
self.write_mathml_postrequests(ppnode)
self.write_mathml_postconditions(ppnode)
self.write_mathml_postguarantees(ppnode)
self.write_mathml_datastructurerequests(ddnode)
fnode.extend([parsnode, ppnode, ssnode, ddnode])
def _initialize_signature(self, ppnode):
if ppnode is None:
print("Problem with kta function contract signature: " + self.name)
return
for pnode in ppnode.findall("par"):
self.signature[pnode.get("name")] = int(pnode.get("nr"))
self.rsignature[int(pnode.get("nr"))] = pnode.get("name")
def _initialize_postconditions(self, pcsnode):
for pcnode in pcsnode.findall("post"):
ipc = self.ixd.parse_mathml_xpredicate(pcnode, self.signature)
pc = self.ixd.get_xpredicate(ipc)
status = pcnode.get("status", "use")
if status == "request":
self.postrequests[ipc] = pc
elif status == "guarantee":
self.postguarantees[ipc] = pc
else:
self.postconditions[ipc] = pc
def _initialize_datastructure_requests(self, ddnode):
for rnode in ddnode.findall("ds-request"):
ckey = int(rnode.get("ckey"))
predid = int(rnode.get("predid"))
self.datastructurerequests[(ckey, predid)] = self.ixd.get_xpredicate(predid)
def _initialize_frame_conditions(self, fnode):
pass
def _initialize(self, xnode):
self._initialize_signature(xnode.find("parameters"))
self._initialize_postconditions(xnode.find("postconditions"))
self._initialize_datastructure_requests(xnode.find("data-structure-requests"))
self._initialize_frame_conditions(xnode.find("frame-conditions"))
def __str__(self):
lines = []
lines.append("Contract for " + self.name)
lines.append("-" * 80)
def add(t, pl):
if len(pl) > 0:
lines.append(t)
for p in pl:
lines.append(" " + str(p))
add("Postconditions used", self.postconditions.values())
add("Postconditions guaranteed", self.postguarantees.values())
add("Postconditions requested", self.postrequests.values())
return "\n".join(lines)
| 42.336898 | 88 | 0.619679 | 6,459 | 0.815839 | 0 | 0 | 0 | 0 | 0 | 0 | 2,256 | 0.284956 |
63d6bfe4aff3255cd21a08c6e08c2856786259e9 | 1,496 | py | Python | amplify/ext/phpfpm/util/version.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 308 | 2015-11-17T13:15:33.000Z | 2022-03-24T12:03:40.000Z | amplify/ext/phpfpm/util/version.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 211 | 2015-11-16T15:27:41.000Z | 2022-03-28T16:20:15.000Z | amplify/ext/phpfpm/util/version.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 80 | 2015-11-16T18:20:30.000Z | 2022-03-02T12:47:56.000Z | # -*- coding: utf-8 -*-
from amplify.agent.common.context import context
from amplify.agent.common.util import subp
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "grant.hulegaard@nginx.com"
VERSION_CMD = "%s --version"
def VERSION_PARSER(bin_path):
try:
raw_stdout, _ = subp.call(VERSION_CMD % bin_path)
except Exception as e:
exc_name = e.__class__.__name__
# this is being logged as debug only since we will rely on bin_path
# collection error to tip off support as to what is going wrong with
# version detection
context.log.debug(
'failed to get version info from "%s" due to %s' %
(bin_path, exc_name)
)
context.log.debug('additional info:', exc_info=True)
else:
# first line is all that we are interested in::
# PHP 5.5.9-1ubuntu4.17 (fpm-fcgi) (built: May 19 2016 19:08:26)
raw_line = raw_stdout[0]
raw_version = raw_line.split()[1] # 5.5.9-1ubuntu4.17
version = []
for char in raw_version:
if char.isdigit() or char in ('.', '-'):
version.append(char)
else:
break
# version = ['5', '.', '5', '.', '9', '-', '1']
# '5.5.9-1',
# 'PHP 5.5.9-1ubuntu4.17 (fpm-fcgi) (built: May 19 2016 19:08:26)'
return ''.join(version), raw_line
| 31.829787 | 76 | 0.590241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.423128 |
891d4ed726e77fd986edee372d297804eaad447d | 385 | py | Python | protera_stability/engine/__init__.py | stepp1/protera-stability | 62f70af00b9475a0b0aeba39fa6ae57f0bb25b34 | [
"MIT"
] | 1 | 2021-11-05T02:14:31.000Z | 2021-11-05T02:14:31.000Z | protera_stability/engine/__init__.py | stepp1/protera-stability | 62f70af00b9475a0b0aeba39fa6ae57f0bb25b34 | [
"MIT"
] | null | null | null | protera_stability/engine/__init__.py | stepp1/protera-stability | 62f70af00b9475a0b0aeba39fa6ae57f0bb25b34 | [
"MIT"
] | null | null | null | from protera_stability.engine.default import get_cfg, setup_train, DefaultTrainer
from protera_stability.engine.lightning_train import (
default_cbs,
DataModule,
LitProteins,
TrainingPl,
)
__all__ = [
"DataModule",
"DefaultTrainer",
"LitProteins",
"TrainingPl",
"default_cbs",
"get_cfg",
"setup_train",
]
assert __all__ == sorted(__all__)
| 19.25 | 81 | 0.698701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.228571 |
891d65158f17bd525585b5367fe4ef83f22f5f0b | 416 | py | Python | 1122.py | wilbertgeng/LeetCode_exercise | f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc | [
"MIT"
] | null | null | null | 1122.py | wilbertgeng/LeetCode_exercise | f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc | [
"MIT"
] | null | null | null | 1122.py | wilbertgeng/LeetCode_exercise | f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc | [
"MIT"
] | null | null | null | """1122. Relative Sort Array"""
class Solution(object):
def relativeSortArray(self, arr1, arr2):
"""
:type arr1: List[int]
:type arr2: List[int]
:rtype: List[int]
"""
####
pos = {num:i for i, num in enumerate(arr2)}
return sorted(arr1, key=lambda x: pos.get(x, 1000+x))
####
return sorted(arr1, key = (arr2 + sorted(arr1)).index)
| 26 | 62 | 0.526442 | 382 | 0.918269 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.336538 |
891f7d296bd8553fa96b43de039756b437874319 | 1,037 | py | Python | src/merge_sort.py | Gsllchb/4interview-python | efec766f276039cb3766f5068c7914f31ac74aca | [
"MIT"
] | null | null | null | src/merge_sort.py | Gsllchb/4interview-python | efec766f276039cb3766f5068c7914f31ac74aca | [
"MIT"
] | null | null | null | src/merge_sort.py | Gsllchb/4interview-python | efec766f276039cb3766f5068c7914f31ac74aca | [
"MIT"
] | null | null | null | import itertools
_THRESHOLD = 64
def merge_sort(arr, lo, hi):
if hi - lo <= _THRESHOLD:
_insert_sort(arr, lo, hi)
return
mi = (lo + hi) // 2
merge_sort(arr, lo, mi)
merge_sort(arr, mi, hi)
_merge(arr, lo, mi, hi)
def _insert_sort(arr, lo, hi):
for i in range(lo, hi):
for j in range(i, lo, -1):
if arr[j] < arr[j - 1]:
arr[j], arr[j - 1] = arr[j - 1], arr[j]
else:
break
def _merge(arr, lo, mi, hi):
res = []
index1 = lo
index2 = mi
while index1 < mi and index2 < hi:
if arr[index1] <= arr[index2]:
res.append(arr[index1])
index1 += 1
else:
res.append(arr[index2])
index2 += 1
res += itertools.islice(arr, index1, mi)
res += itertools.islice(arr, index2, hi)
arr[lo: hi] = res
def _test():
arr = [1, 0, -1, -2, -3, -2, 1, 1, 2, 1, 10, 0]
merge_sort(arr, 0, len(arr))
print(arr)
if __name__ == '__main__':
_test()
| 21.163265 | 55 | 0.493732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.009643 |
891fe67e35e33d36e5cc37b7ff0cf9fe1518755c | 59,661 | py | Python | code/client/munkilib/adobeutils.py | jlrgraham/munki | 9e0deb754083c671f8005f05e7ec44fbe001b790 | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/adobeutils.py | jlrgraham/munki | 9e0deb754083c671f8005f05e7ec44fbe001b790 | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/adobeutils.py | jlrgraham/munki | 9e0deb754083c671f8005f05e7ec44fbe001b790 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# encoding: utf-8
"""
adobeutils.py
Utilities to enable munki to install/uninstall Adobe CS3/CS4/CS5 products
using the CS3/CS4/CS5 Deployment Toolkits.
"""
# Copyright 2009-2014 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#import sys
import os
import re
import subprocess
import time
import tempfile
import sqlite3
from xml.dom import minidom
from glob import glob
import FoundationPlist
import munkicommon
import munkistatus
import utils
# we use lots of camelCase-style names. Deal with it.
# pylint: disable=C0103
class AdobeInstallProgressMonitor(object):
"""A class to monitor installs/removals of Adobe products.
Finds the currently active installation log and scrapes data out of it.
Installations that install a product and updates may actually create
multiple logs."""
def __init__(self, kind='CS5', operation='install'):
'''Provide some hints as to what type of installer is running and
whether we are installing or removing'''
self.kind = kind
self.operation = operation
self.payload_count = {}
def get_current_log(self):
'''Returns the current Adobe install log'''
logpath = '/Library/Logs/Adobe/Installers'
# find the most recently-modified log file
proc = subprocess.Popen(['/bin/ls', '-t1', logpath],
bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, dummy_err) = proc.communicate()
if output:
firstitem = str(output).splitlines()[0]
if firstitem.endswith(".log"):
# return path of most recently modified log file
return os.path.join(logpath, firstitem)
return None
def info(self):
'''Returns the number of completed Adobe payloads,
and the AdobeCode of the most recently completed payload.'''
last_adobecode = ""
logfile = self.get_current_log()
if logfile:
if self.kind in ['CS6', 'CS5']:
regex = r'END TIMER :: \[Payload Operation :\{'
elif self.kind in ['CS3', 'CS4']:
if self.operation == 'install':
regex = r'Closed PCD cache session payload with ID'
else:
regex = r'Closed CAPS session for removal of payload'
else:
if self.operation == 'install':
regex = r'Completing installation for payload at '
else:
regex = r'Physical payload uninstall result '
cmd = ['/usr/bin/grep', '-E', regex, logfile]
proc = subprocess.Popen(cmd, bufsize=-1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, dummy_err) = proc.communicate()
if output:
lines = str(output).splitlines()
completed_payloads = len(lines)
if (not logfile in self.payload_count
or completed_payloads > self.payload_count[logfile]):
# record number of completed payloads
self.payload_count[logfile] = completed_payloads
# now try to get the AdobeCode of the most recently
# completed payload.
# this isn't 100% accurate, but it's mostly for show
# anyway...
regex = re.compile(r'[^{]*(\{[A-Fa-f0-9-]+\})')
lines.reverse()
for line in lines:
m = regex.match(line)
try:
last_adobecode = m.group(1)
break
except (IndexError, AttributeError):
pass
total_completed_payloads = 0
for key in self.payload_count.keys():
total_completed_payloads += self.payload_count[key]
return (total_completed_payloads, last_adobecode)
# dmg helper
# we need this instead of the one in munkicommon because the Adobe stuff
# needs the dmgs mounted under /Volumes. We can merge this later (or not).
def mountAdobeDmg(dmgpath):
"""
Attempts to mount the dmg at dmgpath
and returns a list of mountpoints
"""
mountpoints = []
dmgname = os.path.basename(dmgpath)
proc = subprocess.Popen(['/usr/bin/hdiutil', 'attach', dmgpath,
'-nobrowse', '-noverify', '-plist'],
bufsize=-1,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(pliststr, err) = proc.communicate()
if err:
munkicommon.display_error('Error %s mounting %s.' % (err, dmgname))
if pliststr:
plist = FoundationPlist.readPlistFromString(pliststr)
for entity in plist['system-entities']:
if 'mount-point' in entity:
mountpoints.append(entity['mount-point'])
return mountpoints
def getCS5uninstallXML(optionXMLfile):
'''Gets the uninstall deployment data from a CS5 installer'''
xml = ''
dom = minidom.parse(optionXMLfile)
DeploymentInfo = dom.getElementsByTagName('DeploymentInfo')
if DeploymentInfo:
for info_item in DeploymentInfo:
DeploymentUninstall = info_item.getElementsByTagName(
'DeploymentUninstall')
if DeploymentUninstall:
deploymentData = DeploymentUninstall[0].getElementsByTagName(
'Deployment')
if deploymentData:
Deployment = deploymentData[0]
xml += Deployment.toxml('UTF-8')
return xml
def getCS5mediaSignature(dirpath):
'''Returns the CS5 mediaSignature for an AAMEE CS5 install.
dirpath is typically the root of a mounted dmg'''
payloads_dir = ""
# look for a payloads folder
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith('/payloads'):
payloads_dir = path
# return empty-handed if we didn't find a payloads folder
if not payloads_dir:
return ''
# now look for setup.xml
setupxml = os.path.join(payloads_dir, 'Setup.xml')
if os.path.exists(setupxml) and os.path.isfile(setupxml):
# parse the XML
dom = minidom.parse(setupxml)
setupElements = dom.getElementsByTagName('Setup')
if setupElements:
mediaSignatureElements = \
setupElements[0].getElementsByTagName('mediaSignature')
if mediaSignatureElements:
element = mediaSignatureElements[0]
elementvalue = ''
for node in element.childNodes:
elementvalue += node.nodeValue
return elementvalue
return ""
def getPayloadInfo(dirpath):
'''Parses Adobe payloads, pulling out info useful to munki.
.proxy.xml files are used if available, or for CC-era updates
which do not contain one, the Media_db.db file, which contains
identical XML, is instead used.
CS3/CS4: contain only .proxy.xml
CS5/CS5.5/CS6: contain both
CC: contain only Media_db.db'''
payloadinfo = {}
# look for .proxy.xml file dir
if os.path.isdir(dirpath):
proxy_paths = glob(os.path.join(dirpath, '*.proxy.xml'))
if proxy_paths:
xmlpath = proxy_paths[0]
dom = minidom.parse(xmlpath)
# if there's no .proxy.xml we should hope there's a Media_db.db
else:
db_path = os.path.join(dirpath, 'Media_db.db')
if os.path.exists(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.execute("SELECT value FROM PayloadData WHERE "
"PayloadData.key = 'PayloadInfo'")
result = cur.fetchone()
cur.close()
if result:
info_xml = result[0].encode('UTF-8')
dom = minidom.parseString(info_xml)
else:
# no xml, no db, no payload info!
return payloadinfo
payload_info = dom.getElementsByTagName('PayloadInfo')
if payload_info:
installer_properties = payload_info[0].getElementsByTagName(
'InstallerProperties')
if installer_properties:
properties = installer_properties[0].getElementsByTagName(
'Property')
for prop in properties:
if 'name' in prop.attributes.keys():
propname = prop.attributes['name'].value.encode('UTF-8')
propvalue = ''
for node in prop.childNodes:
propvalue += node.nodeValue
if propname == 'AdobeCode':
payloadinfo['AdobeCode'] = propvalue
if propname == 'ProductName':
payloadinfo['display_name'] = propvalue
if propname == 'ProductVersion':
payloadinfo['version'] = propvalue
installmetadata = payload_info[0].getElementsByTagName(
'InstallDestinationMetadata')
if installmetadata:
totalsizes = installmetadata[0].getElementsByTagName(
'TotalSize')
if totalsizes:
installsize = ''
for node in totalsizes[0].childNodes:
installsize += node.nodeValue
payloadinfo['installed_size'] = int(installsize)/1024
return payloadinfo
def getAdobeSetupInfo(installroot):
'''Given the root of mounted Adobe DMG,
look for info about the installer or updater'''
info = {}
payloads = []
# look for all the payloads folders
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith('/payloads'):
driverfolder = ''
mediaSignature = ''
setupxml = os.path.join(path, 'setup.xml')
if os.path.exists(setupxml):
dom = minidom.parse(setupxml)
drivers = dom.getElementsByTagName('Driver')
if drivers:
driver = drivers[0]
if 'folder' in driver.attributes.keys():
driverfolder = driver.attributes[
'folder'].value.encode('UTF-8')
if driverfolder == '':
# look for mediaSignature (CS5 AAMEE install)
setupElements = dom.getElementsByTagName('Setup')
if setupElements:
mediaSignatureElements = setupElements[
0].getElementsByTagName('mediaSignature')
if mediaSignatureElements:
element = mediaSignatureElements[0]
for node in element.childNodes:
mediaSignature += node.nodeValue
for item in munkicommon.listdir(path):
payloadpath = os.path.join(path, item)
payloadinfo = getPayloadInfo(payloadpath)
if payloadinfo:
payloads.append(payloadinfo)
if ((driverfolder and item == driverfolder) or
(mediaSignature and
payloadinfo['AdobeCode'] == mediaSignature)):
info['display_name'] = payloadinfo['display_name']
info['version'] = payloadinfo['version']
info['AdobeSetupType'] = 'ProductInstall'
if not payloads:
# look for an extensions folder; almost certainly this is an Updater
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith("/extensions"):
for item in munkicommon.listdir(path):
#skip LanguagePacks
if item.find("LanguagePack") == -1:
itempath = os.path.join(path, item)
payloadinfo = getPayloadInfo(itempath)
if payloadinfo:
payloads.append(payloadinfo)
# we found an extensions dir,
# so no need to keep walking the install root
break
if payloads:
if len(payloads) == 1:
info['display_name'] = payloads[0]['display_name']
info['version'] = payloads[0]['version']
else:
if not 'display_name' in info:
info['display_name'] = "ADMIN: choose from payloads"
if not 'version' in info:
info['version'] = "ADMIN please set me"
info['payloads'] = payloads
installed_size = 0
for payload in payloads:
installed_size = installed_size + payload.get('installed_size', 0)
info['installed_size'] = installed_size
return info
def getAdobePackageInfo(installroot):
'''Gets the package name from the AdobeUberInstaller.xml file;
other info from the payloads folder'''
info = getAdobeSetupInfo(installroot)
info['description'] = ""
installerxml = os.path.join(installroot, "AdobeUberInstaller.xml")
if os.path.exists(installerxml):
description = ''
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
packagedescriptions = \
installinfo[0].getElementsByTagName("PackageDescription")
if packagedescriptions:
prop = packagedescriptions[0]
for node in prop.childNodes:
description += node.nodeValue
if description:
description_parts = description.split(' : ', 1)
info['display_name'] = description_parts[0]
if len(description_parts) > 1:
info['description'] = description_parts[1]
else:
info['description'] = ""
return info
else:
installerxml = os.path.join(installroot, "optionXML.xml")
if os.path.exists(installerxml):
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
pkgname_elems = installinfo[0].getElementsByTagName(
"PackageName")
if pkgname_elems:
prop = pkgname_elems[0]
pkgname = ""
for node in prop.childNodes:
pkgname += node.nodeValue
info['display_name'] = pkgname
if not info.get('display_name'):
info['display_name'] = os.path.basename(installroot)
return info
def getXMLtextElement(dom_node, name):
'''Returns the text value of the first item found with the given
tagname'''
value = None
subelements = dom_node.getElementsByTagName(name)
if subelements:
value = ''
for node in subelements[0].childNodes:
value += node.nodeValue
return value
def parseOptionXML(option_xml_file):
'''Parses an optionXML.xml file and pulls ot items of interest, returning
them in a dictionary'''
info = {}
dom = minidom.parse(option_xml_file)
installinfo = dom.getElementsByTagName('InstallInfo')
if installinfo:
if 'id' in installinfo[0].attributes.keys():
info['packager_id'] = installinfo[0].attributes['id'].value
if 'version' in installinfo[0].attributes.keys():
info['packager_version'] = installinfo[
0].attributes['version'].value
info['package_name'] = getXMLtextElement(installinfo[0], 'PackageName')
info['package_id'] = getXMLtextElement(installinfo[0], 'PackageID')
info['products'] = []
medias_elements = installinfo[0].getElementsByTagName('Medias')
if medias_elements:
media_elements = medias_elements[0].getElementsByTagName('Media')
if media_elements:
for media in media_elements:
product = {}
product['prodName'] = getXMLtextElement(media, 'prodName')
product['prodVersion'] = getXMLtextElement(
media, 'prodVersion')
setup_elements = media.getElementsByTagName('Setup')
if setup_elements:
mediaSignatureElements = setup_elements[
0].getElementsByTagName('mediaSignature')
if mediaSignatureElements:
product['mediaSignature'] = ''
element = mediaSignatureElements[0]
for node in element.childNodes:
product['mediaSignature'] += node.nodeValue
info['products'].append(product)
return info
def countPayloads(dirpath):
'''Attempts to count the payloads in the Adobe installation item'''
count = 0
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("/payloads"):
for subitem in munkicommon.listdir(path):
subitempath = os.path.join(path, subitem)
if os.path.isdir(subitempath):
count = count + 1
return count
def getPercent(current, maximum):
'''Returns a value useful with MunkiStatus to use when
displaying precent-done stauts'''
if maximum == 0:
percentdone = -1
elif current < 0:
percentdone = -1
elif current > maximum:
percentdone = -1
elif current == maximum:
percentdone = 100
else:
percentdone = int(float(current)/float(maximum)*100)
return percentdone
def findSetupApp(dirpath):
'''Search dirpath and enclosed directories for Setup.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Setup.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Setup")
if os.path.exists(setup_path):
return setup_path
return ''
def findInstallApp(dirpath):
'''Searches dirpath and enclosed directories for Install.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Install.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Install")
if os.path.exists(setup_path):
return setup_path
return ''
def findAdobePatchInstallerApp(dirpath):
'''Searches dirpath and enclosed directories for AdobePatchInstaller.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("AdobePatchInstaller.app"):
setup_path = os.path.join(
path, "Contents", "MacOS", "AdobePatchInstaller")
if os.path.exists(setup_path):
return setup_path
return ''
def findAdobeDeploymentManager(dirpath):
'''Searches dirpath and enclosed directories for AdobeDeploymentManager.
Returns path to the executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("pkg/Contents/Resources"):
dm_path = os.path.join(path, "AdobeDeploymentManager")
if os.path.exists(dm_path):
return dm_path
return ''
secondsToLive = {}
def killStupidProcesses():
'''A nasty bit of hackery to get Adobe CS5 AAMEE packages to install
when at the loginwindow.'''
stupid_processes = ["Adobe AIR Installer",
"Adobe AIR Application Installer",
"InstallAdobeHelp",
"open -a /Library/Application Support/Adobe/"
"SwitchBoard/SwitchBoard.app",
"/bin/bash /Library/Application Support/Adobe/"
"SwitchBoard/SwitchBoard.app/Contents/MacOS/"
"switchboard.sh"]
for procname in stupid_processes:
pid = utils.getPIDforProcessName(procname)
if pid:
if not pid in secondsToLive:
secondsToLive[pid] = 30
else:
secondsToLive[pid] = secondsToLive[pid] - 1
if secondsToLive[pid] == 0:
# it's been running too long; kill it
munkicommon.log("Killing PID %s: %s" % (pid, procname))
try:
os.kill(int(pid), 9)
except OSError:
pass
# remove this PID from our list
del secondsToLive[pid]
# only kill one process per invocation
return
def runAdobeInstallTool(
cmd, number_of_payloads=0, killAdobeAIR=False, payloads=None,
kind="CS5", operation="install"):
'''An abstraction of the tasks for running Adobe Setup,
AdobeUberInstaller, AdobeUberUninstaller, AdobeDeploymentManager, etc'''
# initialize an AdobeInstallProgressMonitor object.
progress_monitor = AdobeInstallProgressMonitor(
kind=kind, operation=operation)
if munkicommon.munkistatusoutput and not number_of_payloads:
# indeterminate progress bar
munkistatus.percent(-1)
proc = subprocess.Popen(cmd, shell=False, bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
old_payload_completed_count = 0
payloadname = ""
while proc.poll() == None:
time.sleep(1)
(payload_completed_count, adobe_code) = progress_monitor.info()
if payload_completed_count > old_payload_completed_count:
old_payload_completed_count = payload_completed_count
if adobe_code and payloads:
matched_payloads = [payload for payload in payloads
if payload.get('AdobeCode') == adobe_code]
if matched_payloads:
payloadname = matched_payloads[0].get('display_name')
else:
payloadname = adobe_code
payloadinfo = " - " + payloadname
else:
payloadinfo = ""
if number_of_payloads:
munkicommon.display_status_minor(
'Completed payload %s of %s%s' %
(payload_completed_count, number_of_payloads,
payloadinfo))
else:
munkicommon.display_status_minor(
'Completed payload %s%s',
payload_completed_count, payloadinfo)
if munkicommon.munkistatusoutput:
munkistatus.percent(
getPercent(payload_completed_count, number_of_payloads))
# Adobe AIR Installer workaround/hack
# CSx installs at the loginwindow hang when Adobe AIR is installed.
# So we check for this and kill the process. Ugly.
# Hopefully we can disable this in the future.
if killAdobeAIR:
if (not munkicommon.getconsoleuser() or
munkicommon.getconsoleuser() == u"loginwindow"):
# we're at the loginwindow.
killStupidProcesses()
# run of tool completed
retcode = proc.poll()
#check output for errors
output = proc.stdout.readlines()
for line in output:
line = line.rstrip("\n")
if line.startswith("Error"):
munkicommon.display_error(line)
if line.startswith("Exit Code:"):
if retcode == 0:
try:
retcode = int(line[11:])
except (ValueError, TypeError):
retcode = -1
if retcode != 0 and retcode != 8:
munkicommon.display_error(
'Adobe Setup error: %s: %s', retcode, adobeSetupError(retcode))
else:
if munkicommon.munkistatusoutput:
munkistatus.percent(100)
munkicommon.display_status_minor('Done.')
return retcode
def runAdobeSetup(dmgpath, uninstalling=False, payloads=None):
'''Runs the Adobe setup tool in silent mode from
an Adobe update DMG or an Adobe CS3 install DMG'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
setup_path = findSetupApp(mountpoints[0])
if setup_path:
# look for install.xml or uninstall.xml at root
deploymentfile = None
installxml = os.path.join(mountpoints[0], "install.xml")
uninstallxml = os.path.join(mountpoints[0], "uninstall.xml")
if uninstalling:
operation = 'uninstall'
if os.path.exists(uninstallxml):
deploymentfile = uninstallxml
else:
# we've been asked to uninstall,
# but found no uninstall.xml
# so we need to bail
munkicommon.unmountdmg(mountpoints[0])
munkicommon.display_error(
'%s doesn\'t appear to contain uninstall info.',
os.path.basename(dmgpath))
return -1
else:
operation = 'install'
if os.path.exists(installxml):
deploymentfile = installxml
# try to find and count the number of payloads
# so we can give a rough progress indicator
number_of_payloads = countPayloads(mountpoints[0])
munkicommon.display_status_minor('Running Adobe Setup')
adobe_setup = [setup_path, '--mode=silent', '--skipProcessCheck=1']
if deploymentfile:
adobe_setup.append('--deploymentFile=%s' % deploymentfile)
retcode = runAdobeInstallTool(
adobe_setup, number_of_payloads, payloads=payloads,
kind='CS3', operation=operation)
else:
munkicommon.display_error(
'%s doesn\'t appear to contain Adobe Setup.' %
os.path.basename(dmgpath))
retcode = -1
munkicommon.unmountdmg(mountpoints[0])
return retcode
else:
munkicommon.display_error('No mountable filesystems on %s' % dmgpath)
return -1
def writefile(stringdata, path):
'''Writes string data to path.
Returns the path on success, empty string on failure.'''
try:
fileobject = open(path, mode='w', buffering=1)
print >> fileobject, stringdata.encode('UTF-8')
fileobject.close()
return path
except (OSError, IOError):
munkicommon.display_error("Couldn't write %s" % stringdata)
return ""
def doAdobeCS5Uninstall(adobeInstallInfo, payloads=None):
'''Runs the locally-installed Adobe CS5 tools to remove CS5 products.
We need the uninstallxml and the CS5 Setup.app.'''
uninstallxml = adobeInstallInfo.get('uninstallxml')
if not uninstallxml:
munkicommon.display_error("No uninstall.xml in adobe_install_info")
return -1
payloadcount = adobeInstallInfo.get('payload_count', 0)
path = os.path.join(munkicommon.tmpdir(), "uninstall.xml")
deploymentFile = writefile(uninstallxml, path)
if not deploymentFile:
return -1
setupapp = "/Library/Application Support/Adobe/OOBE/PDApp/DWA/Setup.app"
setup = os.path.join(setupapp, "Contents/MacOS/Setup")
if not os.path.exists(setup):
munkicommon.display_error("%s is not installed." % setupapp)
return -1
uninstall_cmd = [setup,
'--mode=silent',
'--action=uninstall',
'--skipProcessCheck=1',
'--deploymentFile=%s' % deploymentFile]
munkicommon.display_status_minor('Running Adobe Uninstall')
return runAdobeInstallTool(uninstall_cmd, payloadcount, payloads=payloads,
kind='CS5', operation='uninstall')
def runAdobeCCPpkgScript(dmgpath, payloads=None, operation='install'):
'''Installs or removes an Adobe product packaged via
Creative Cloud Packager'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if not mountpoints:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
deploymentmanager = findAdobeDeploymentManager(mountpoints[0])
if not deploymentmanager:
munkicommon.display_error(
'%s doesn\'t appear to contain AdobeDeploymentManager',
os.path.basename(dmgpath))
munkicommon.unmountdmg(mountpoints[0])
return -1
# big hack to convince the Adobe tools to install off a mounted
# disk image.
#
# For some reason, some versions of the Adobe install tools refuse to
# install when the payloads are on a "removable" disk,
# which includes mounted disk images.
#
# we create a temporary directory on the local disk and then symlink
# some resources from the mounted disk image to the temporary
# directory. When we pass this temporary directory to the Adobe
# installation tools, they are now happy.
basepath = os.path.dirname(deploymentmanager)
preinstall_script = os.path.join(basepath, "preinstall")
if not os.path.exists(preinstall_script):
if operation == 'install':
munkicommon.display_error(
"No Adobe install script found on %s" % dmgpath)
else:
munkicommon.display_error(
"No Adobe uninstall script found on %s" % dmgpath)
munkicommon.unmountdmg(mountpoints[0])
return -1
number_of_payloads = countPayloads(basepath)
tmpdir = tempfile.mkdtemp(prefix='munki-', dir='/tmp')
# make our symlinks
for dir_name in ['ASU' 'ASU2', 'ProvisioningTool', 'uninstallinfo']:
if os.path.isdir(os.path.join(basepath, dir_name)):
os.symlink(os.path.join(basepath, dir_name),
os.path.join(tmpdir, dir_name))
for dir_name in ['Patches', 'Setup']:
realdir = os.path.join(basepath, dir_name)
if os.path.isdir(realdir):
tmpsubdir = os.path.join(tmpdir, dir_name)
os.mkdir(tmpsubdir)
for item in munkicommon.listdir(realdir):
os.symlink(os.path.join(realdir, item),
os.path.join(tmpsubdir, item))
os_version_tuple = munkicommon.getOsVersion(as_tuple=True)
if (os_version_tuple < (10, 11) and
(not munkicommon.getconsoleuser() or
munkicommon.getconsoleuser() == u"loginwindow")):
# we're at the loginwindow, so we need to run the deployment
# manager in the loginwindow context using launchctl bsexec
# launchctl bsexec doesn't work for this in El Cap, so do it
# only if we're running Yosemite or earlier
loginwindowPID = utils.getPIDforProcessName("loginwindow")
cmd = ['/bin/launchctl', 'bsexec', loginwindowPID]
else:
cmd = []
# preinstall script is in pkg/Contents/Resources, so calculate
# path to pkg
pkg_dir = os.path.dirname(os.path.dirname(basepath))
cmd.extend([preinstall_script, pkg_dir, '/', '/'])
if operation == 'install':
munkicommon.display_status_minor('Starting Adobe installer...')
retcode = runAdobeInstallTool(
cmd, number_of_payloads, killAdobeAIR=True, payloads=payloads,
kind='CS6', operation=operation)
# now clean up and return
dummy_result = subprocess.call(["/bin/rm", "-rf", tmpdir])
munkicommon.unmountdmg(mountpoints[0])
return retcode
def runAdobeCS5AAMEEInstall(dmgpath, payloads=None):
'''Installs a CS5 product using an AAMEE-generated package on a
disk image.'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if not mountpoints:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
deploymentmanager = findAdobeDeploymentManager(mountpoints[0])
if deploymentmanager:
# big hack to convince the Adobe tools to install off a mounted
# disk image.
#
# For some reason, some versions of the Adobe install tools refuse to
# install when the payloads are on a "removable" disk,
# which includes mounted disk images.
#
# we create a temporary directory on the local disk and then symlink
# some resources from the mounted disk image to the temporary
# directory. When we pass this temporary directory to the Adobe
# installation tools, they are now happy.
basepath = os.path.dirname(deploymentmanager)
number_of_payloads = countPayloads(basepath)
tmpdir = tempfile.mkdtemp(prefix='munki-', dir='/tmp')
# make our symlinks
os.symlink(os.path.join(basepath, "ASU"), os.path.join(tmpdir, "ASU"))
os.symlink(os.path.join(basepath, "ProvisioningTool"),
os.path.join(tmpdir, "ProvisioningTool"))
for dir_name in ['Patches', 'Setup']:
realdir = os.path.join(basepath, dir_name)
if os.path.isdir(realdir):
tmpsubdir = os.path.join(tmpdir, dir_name)
os.mkdir(tmpsubdir)
for item in munkicommon.listdir(realdir):
os.symlink(
os.path.join(realdir, item),
os.path.join(tmpsubdir, item))
optionXMLfile = os.path.join(basepath, "optionXML.xml")
os_version_tuple = munkicommon.getOsVersion(as_tuple=True)
if (os_version_tuple < (10, 11) and
(not munkicommon.getconsoleuser() or
munkicommon.getconsoleuser() == u"loginwindow")):
# we're at the loginwindow, so we need to run the deployment
# manager in the loginwindow context using launchctl bsexec
# launchctl bsexec doesn't work for this in El Cap, so do it
# only if we're running Yosemite or earlier
loginwindowPID = utils.getPIDforProcessName("loginwindow")
cmd = ['/bin/launchctl', 'bsexec', loginwindowPID]
else:
cmd = []
cmd.extend([deploymentmanager, '--optXMLPath=%s' % optionXMLfile,
'--setupBasePath=%s' % basepath, '--installDirPath=/',
'--mode=install'])
munkicommon.display_status_minor('Starting Adobe installer...')
retcode = runAdobeInstallTool(
cmd, number_of_payloads, killAdobeAIR=True, payloads=payloads,
kind='CS5', operation='install')
# now clean up our symlink hackfest
dummy_result = subprocess.call(["/bin/rm", "-rf", tmpdir])
else:
munkicommon.display_error(
'%s doesn\'t appear to contain AdobeDeploymentManager',
os.path.basename(dmgpath))
retcode = -1
munkicommon.unmountdmg(mountpoints[0])
return retcode
def runAdobeCS5PatchInstaller(dmgpath, copylocal=False, payloads=None):
'''Runs the AdobePatchInstaller for CS5.
Optionally can copy the DMG contents to the local disk
to work around issues with the patcher.'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
if copylocal:
# copy the update to the local disk before installing
updatedir = tempfile.mkdtemp(prefix='munki-', dir='/tmp')
retcode = subprocess.call(
["/bin/cp", "-r", mountpoints[0], updatedir])
# unmount diskimage
munkicommon.unmountdmg(mountpoints[0])
if retcode:
munkicommon.display_error(
'Error copying items from %s' % dmgpath)
return -1
# remove the dmg file to free up space, since we don't need it
# any longer
dummy_result = subprocess.call(["/bin/rm", dmgpath])
else:
updatedir = mountpoints[0]
patchinstaller = findAdobePatchInstallerApp(updatedir)
if patchinstaller:
# try to find and count the number of payloads
# so we can give a rough progress indicator
number_of_payloads = countPayloads(updatedir)
munkicommon.display_status_minor('Running Adobe Patch Installer')
install_cmd = [patchinstaller,
'--mode=silent',
'--skipProcessCheck=1']
retcode = runAdobeInstallTool(install_cmd,
number_of_payloads, payloads=payloads,
kind='CS5', operation='install')
else:
munkicommon.display_error(
"%s doesn't appear to contain AdobePatchInstaller.app.",
os.path.basename(dmgpath))
retcode = -1
if copylocal:
# clean up our mess
dummy_result = subprocess.call(["/bin/rm", "-rf", updatedir])
else:
munkicommon.unmountdmg(mountpoints[0])
return retcode
else:
munkicommon.display_error('No mountable filesystems on %s' % dmgpath)
return -1
def runAdobeUberTool(dmgpath, pkgname='', uninstalling=False, payloads=None):
'''Runs either AdobeUberInstaller or AdobeUberUninstaller
from a disk image and provides progress feedback.
pkgname is the name of a directory at the top level of the dmg
containing the AdobeUber tools and their XML files.'''
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
installroot = mountpoints[0]
if uninstalling:
ubertool = os.path.join(installroot, pkgname,
"AdobeUberUninstaller")
else:
ubertool = os.path.join(installroot, pkgname,
"AdobeUberInstaller")
if os.path.exists(ubertool):
info = getAdobePackageInfo(installroot)
packagename = info['display_name']
action = "Installing"
operation = "install"
if uninstalling:
action = "Uninstalling"
operation = "uninstall"
munkicommon.display_status_major('%s %s' % (action, packagename))
if munkicommon.munkistatusoutput:
munkistatus.detail('Starting %s' % os.path.basename(ubertool))
# try to find and count the number of payloads
# so we can give a rough progress indicator
number_of_payloads = countPayloads(installroot)
retcode = runAdobeInstallTool(
[ubertool], number_of_payloads, killAdobeAIR=True,
payloads=payloads, kind='CS4', operation=operation)
else:
munkicommon.display_error("No %s found" % ubertool)
retcode = -1
munkicommon.unmountdmg(installroot)
return retcode
else:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
def findAcrobatPatchApp(dirpath):
'''Attempts to find an AcrobatPro patching application
in dirpath. If found, returns the path to the bundled
patching script.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith(".app"):
# look for Adobe's patching script
patch_script_path = os.path.join(
path, 'Contents', 'Resources', 'ApplyOperation.py')
if os.path.exists(patch_script_path):
return path
return ''
def updateAcrobatPro(dmgpath):
"""Uses the scripts and Resources inside the Acrobat Patch application
bundle to silently update Acrobat Pro and related apps
Why oh why does this use a different mechanism than the other Adobe
apps?"""
if munkicommon.munkistatusoutput:
munkistatus.percent(-1)
#first mount the dmg
munkicommon.display_status_minor(
'Mounting disk image %s' % os.path.basename(dmgpath))
mountpoints = mountAdobeDmg(dmgpath)
if mountpoints:
installroot = mountpoints[0]
pathToAcrobatPatchApp = findAcrobatPatchApp(installroot)
else:
munkicommon.display_error("No mountable filesystems on %s" % dmgpath)
return -1
if not pathToAcrobatPatchApp:
munkicommon.display_error(
'No Acrobat Patch app at %s', pathToAcrobatPatchApp)
munkicommon.unmountdmg(installroot)
return -1
# some values needed by the patching script
resourcesDir = os.path.join(
pathToAcrobatPatchApp, 'Contents', 'Resources')
ApplyOperation = os.path.join(resourcesDir, 'ApplyOperation.py')
callingScriptPath = os.path.join(resourcesDir, 'InstallUpdates.sh')
appList = []
appListFile = os.path.join(resourcesDir, 'app_list.txt')
if os.path.exists(appListFile):
fileobj = open(appListFile, mode='r', buffering=-1)
if fileobj:
for line in fileobj.readlines():
appList.append(line)
fileobj.close()
if not appList:
munkicommon.display_error('Did not find a list of apps to update.')
munkicommon.unmountdmg(installroot)
return -1
payloadNum = -1
for line in appList:
payloadNum = payloadNum + 1
if munkicommon.munkistatusoutput:
munkistatus.percent(getPercent(payloadNum + 1, len(appList) + 1))
(appname, status) = line.split("\t")
munkicommon.display_status_minor('Searching for %s' % appname)
# first look in the obvious place
pathname = os.path.join("/Applications/Adobe Acrobat 9 Pro", appname)
if os.path.exists(pathname):
item = {}
item['path'] = pathname
candidates = [item]
else:
# use system_profiler to search for the app
candidates = [item for item in munkicommon.getAppData()
if item['path'].endswith('/' + appname)]
# hope there's only one!
if len(candidates) == 0:
if status == "optional":
continue
else:
munkicommon.display_error("Cannot patch %s because it "
"was not found on the startup "
"disk." % appname)
munkicommon.unmountdmg(installroot)
return -1
if len(candidates) > 1:
munkicommon.display_error("Cannot patch %s because we found "
"more than one copy on the "
"startup disk." % appname)
munkicommon.unmountdmg(installroot)
return -1
munkicommon.display_status_minor('Updating %s' % appname)
apppath = os.path.dirname(candidates[0]["path"])
cmd = [ApplyOperation, apppath, appname, resourcesDir,
callingScriptPath, str(payloadNum)]
proc = subprocess.Popen(cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while proc.poll() == None:
time.sleep(1)
# run of patch tool completed
retcode = proc.poll()
if retcode != 0:
munkicommon.display_error(
'Error patching %s: %s', appname, retcode)
break
else:
munkicommon.display_status_minor('Patching %s complete.', appname)
munkicommon.display_status_minor('Done.')
if munkicommon.munkistatusoutput:
munkistatus.percent(100)
munkicommon.unmountdmg(installroot)
return retcode
def getBundleInfo(path):
"""
Returns Info.plist data if available
for bundle at path
"""
infopath = os.path.join(path, "Contents", "Info.plist")
if not os.path.exists(infopath):
infopath = os.path.join(path, "Resources", "Info.plist")
if os.path.exists(infopath):
try:
plist = FoundationPlist.readPlist(infopath)
return plist
except FoundationPlist.NSPropertyListSerializationException:
pass
return None
def getAdobeInstallInfo(installdir):
'''Encapsulates info used by the Adobe Setup/Install app.'''
adobeInstallInfo = {}
if installdir:
adobeInstallInfo['media_signature'] = getCS5mediaSignature(installdir)
adobeInstallInfo['payload_count'] = countPayloads(installdir)
optionXMLfile = os.path.join(installdir, "optionXML.xml")
if os.path.exists(optionXMLfile):
adobeInstallInfo['uninstallxml'] = \
getCS5uninstallXML(optionXMLfile)
return adobeInstallInfo
def getAdobeCatalogInfo(mountpoint, pkgname=""):
'''Used by makepkginfo to build pkginfo data for Adobe
installers/updaters'''
# look for AdobeDeploymentManager (AAMEE installer)
deploymentmanager = findAdobeDeploymentManager(mountpoint)
if deploymentmanager:
dirpath = os.path.dirname(deploymentmanager)
option_xml_file = os.path.join(dirpath, 'optionXML.xml')
option_xml_info = {}
if os.path.exists(option_xml_file):
option_xml_info = parseOptionXML(option_xml_file)
cataloginfo = getAdobePackageInfo(dirpath)
if cataloginfo:
# add some more data
if option_xml_info.get('packager_id') == u'CloudPackager':
# CCP package
cataloginfo['display_name'] = option_xml_info.get(
'package_name', 'unknown')
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCCPUninstaller"
cataloginfo['installer_type'] = "AdobeCCPInstaller"
cataloginfo['minimum_os_version'] = "10.6.8"
mediasignatures = [
item['mediaSignature']
for item in option_xml_info.get('products', [])
if 'mediaSignature' in item]
else:
# AAMEE package
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCS5AAMEEPackage"
cataloginfo['installer_type'] = "AdobeCS5AAMEEPackage"
cataloginfo['minimum_os_version'] = "10.5.0"
cataloginfo['adobe_install_info'] = getAdobeInstallInfo(
installdir=dirpath)
mediasignature = cataloginfo['adobe_install_info'].get(
"media_signature")
mediasignatures = [mediasignature]
if mediasignatures:
# make a default <key>installs</key> array
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
installs = []
for mediasignature in mediasignatures:
signaturefile = mediasignature + ".db"
filepath = os.path.join(uninstalldir, signaturefile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
cataloginfo['installs'] = installs
return cataloginfo
# Look for Install.app (Bare metal CS5 install)
# we don't handle this type, but we'll report it
# back so makepkginfo can provide an error message
installapp = findInstallApp(mountpoint)
if installapp:
cataloginfo = {}
cataloginfo['installer_type'] = "AdobeCS5Installer"
return cataloginfo
# Look for AdobePatchInstaller.app (CS5 updater)
installapp = findAdobePatchInstallerApp(mountpoint)
if os.path.exists(installapp):
# this is a CS5 updater disk image
cataloginfo = getAdobePackageInfo(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = False
cataloginfo['installer_type'] = "AdobeCS5PatchInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
# make some (hopfully functional) installs items from the payloads
installs = []
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
# first look for a payload with a display_name matching the
# overall display_name
for payload in cataloginfo.get('payloads', []):
if (payload.get('display_name', '') ==
cataloginfo['display_name']):
if 'AdobeCode' in payload:
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
break
if installs == []:
# didn't find a payload with matching name
# just add all of the non-LangPack payloads
# to the installs list.
for payload in cataloginfo.get('payloads', []):
if 'AdobeCode' in payload:
if ("LangPack" in payload.get("display_name") or
"Language Files" in payload.get(
"display_name")):
# skip Language Packs
continue
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
cataloginfo['installs'] = installs
return cataloginfo
# Look for AdobeUberInstaller items (CS4 install)
pkgroot = os.path.join(mountpoint, pkgname)
adobeinstallxml = os.path.join(pkgroot, "AdobeUberInstaller.xml")
if os.path.exists(adobeinstallxml):
# this is a CS4 Enterprise Deployment package
cataloginfo = getAdobePackageInfo(pkgroot)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeUberUninstaller"
cataloginfo['installer_type'] = "AdobeUberInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
return cataloginfo
# maybe this is an Adobe update DMG or CS3 installer
# look for Adobe Setup.app
setuppath = findSetupApp(mountpoint)
if setuppath:
cataloginfo = getAdobeSetupInfo(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['installer_type'] = "AdobeSetup"
if cataloginfo.get('AdobeSetupType') == "ProductInstall":
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeSetup"
else:
cataloginfo['description'] = "Adobe updater"
cataloginfo['uninstallable'] = False
cataloginfo['update_for'] = ["PleaseEditMe-1.0.0.0.0"]
return cataloginfo
# maybe this is an Adobe Acrobat 9 Pro patcher?
acrobatpatcherapp = findAcrobatPatchApp(mountpoint)
if acrobatpatcherapp:
cataloginfo = {}
cataloginfo['installer_type'] = "AdobeAcrobatUpdater"
cataloginfo['uninstallable'] = False
plist = getBundleInfo(acrobatpatcherapp)
cataloginfo['version'] = munkicommon.getVersionString(plist)
cataloginfo['name'] = "AcrobatPro9Update"
cataloginfo['display_name'] = "Adobe Acrobat Pro Update"
cataloginfo['update_for'] = ["AcrobatPro9"]
cataloginfo['RestartAction'] = 'RequireLogout'
cataloginfo['requires'] = []
cataloginfo['installs'] = [
{'CFBundleIdentifier': 'com.adobe.Acrobat.Pro',
'CFBundleName': 'Acrobat',
'CFBundleShortVersionString': cataloginfo['version'],
'path': '/Applications/Adobe Acrobat 9 Pro/Adobe Acrobat Pro.app',
'type': 'application'}
]
return cataloginfo
# didn't find any Adobe installers/updaters we understand
return None
def adobeSetupError(errorcode):
'''Returns text description for numeric error code
Reference:
http://www.adobe.com/devnet/creativesuite/pdfs/DeployGuide.pdf'''
errormessage = {
0: "Application installed successfully",
1: "Unable to parse command line",
2: "Unknown user interface mode specified",
3: "Unable to initialize ExtendScript",
4: "User interface workflow failed",
5: "Unable to initialize user interface workflow",
6: "Silent workflow completed with errors",
7: "Unable to complete the silent workflow",
8: "Exit and restart",
9: "Unsupported operating system version",
10: "Unsupported file system",
11: "Another instance of Adobe Setup is running",
12: "CAPS integrity error",
13: "Media optimization failed",
14: "Failed due to insufficient privileges",
15: "Media DB Sync Failed",
16: "Failed to laod the Deployment file",
17: "EULA Acceptance Failed",
18: "C3PO Bootstrap Failed",
19: "Conflicting processes running",
20: "Install source path not specified or does not exist",
21: "Version of payloads is not supported by this version of RIB",
22: "Install Directory check failed",
23: "System Requirements Check failed",
24: "Exit User Canceled Workflow",
25: "A binary path Name exceeded Operating System's MAX PATH limit",
26: "Media Swap Required in Silent Mode",
27: "Keyed files detected in target",
28: "Base product is not installed",
29: "Base product has been moved",
30: "Insufficient disk space to install the payload + Done with errors",
31: "Insufficient disk space to install the payload + Failed",
32: "The patch is already applied",
9999: "Catastrophic error",
-1: "AdobeUberInstaller failed before launching Setup"}
return errormessage.get(errorcode, "Unknown error")
def doAdobeRemoval(item):
'''Wrapper for all the Adobe removal methods'''
uninstallmethod = item['uninstall_method']
payloads = item.get("payloads")
itempath = ""
if "uninstaller_item" in item:
managedinstallbase = munkicommon.pref('ManagedInstallDir')
itempath = os.path.join(managedinstallbase, 'Cache',
item["uninstaller_item"])
if not os.path.exists(itempath):
munkicommon.display_error("%s package for %s was "
"missing from the cache."
% (uninstallmethod, item['name']))
return -1
if uninstallmethod == "AdobeSetup":
# CS3 uninstall
retcode = runAdobeSetup(itempath, uninstalling=True, payloads=payloads)
elif uninstallmethod == "AdobeUberUninstaller":
# CS4 uninstall
pkgname = item.get("adobe_package_name") or item.get("package_path", "")
retcode = runAdobeUberTool(
itempath, pkgname, uninstalling=True, payloads=payloads)
elif uninstallmethod == "AdobeCS5AAMEEPackage":
# CS5 uninstall. Sheesh. Three releases, three methods.
adobeInstallInfo = item.get('adobe_install_info')
retcode = doAdobeCS5Uninstall(adobeInstallInfo, payloads=payloads)
elif uninstallmethod == "AdobeCCPUninstaller":
# Adobe Creative Cloud Packager packages
retcode = runAdobeCCPpkgScript(
itempath, payloads=payloads, operation="uninstall")
if retcode:
munkicommon.display_error("Uninstall of %s failed.", item['name'])
return retcode
def doAdobeInstall(item):
'''Wrapper to handle all the Adobe installer methods.
First get the path to the installer dmg. We know
it exists because installer.py already checked.'''
managedinstallbase = munkicommon.pref('ManagedInstallDir')
itempath = os.path.join(
managedinstallbase, 'Cache', item['installer_item'])
installer_type = item.get("installer_type", "")
payloads = item.get("payloads")
if installer_type == "AdobeSetup":
# Adobe CS3/CS4 updater or Adobe CS3 installer
retcode = runAdobeSetup(itempath, payloads=payloads)
elif installer_type == "AdobeUberInstaller":
# Adobe CS4 installer
pkgname = item.get("adobe_package_name") or item.get("package_path", "")
retcode = runAdobeUberTool(itempath, pkgname, payloads=payloads)
elif installer_type == "AdobeAcrobatUpdater":
# Acrobat Pro 9 updater
retcode = updateAcrobatPro(itempath)
elif installer_type == "AdobeCS5AAMEEPackage":
# Adobe CS5 AAMEE package
retcode = runAdobeCS5AAMEEInstall(itempath, payloads=payloads)
elif installer_type == "AdobeCS5PatchInstaller":
# Adobe CS5 updater
retcode = runAdobeCS5PatchInstaller(
itempath, copylocal=item.get("copy_local"), payloads=payloads)
elif installer_type == "AdobeCCPInstaller":
# Adobe Creative Cloud Packager packages
retcode = runAdobeCCPpkgScript(itempath, payloads=payloads)
return retcode
def main():
'''Placeholder'''
pass
if __name__ == '__main__':
main()
| 40.752049 | 80 | 0.593755 | 3,553 | 0.059553 | 0 | 0 | 0 | 0 | 0 | 0 | 18,404 | 0.308476 |
8921c8342da583a53fe7e6baf0b2a3160c459d31 | 52 | py | Python | examples/fore.py | LyQuid12/colorgb | 78addcf85f0e750ca45a7955e5008f7a8a946281 | [
"MIT"
] | 1 | 2022-01-26T10:26:24.000Z | 2022-01-26T10:26:24.000Z | examples/fore.py | LyQuid12/colorgb | 78addcf85f0e750ca45a7955e5008f7a8a946281 | [
"MIT"
] | null | null | null | examples/fore.py | LyQuid12/colorgb | 78addcf85f0e750ca45a7955e5008f7a8a946281 | [
"MIT"
] | null | null | null | import colorgb
print(colorgb.fore("Hi!", "green"))
| 13 | 35 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.230769 |
892211146079eeb490107a06692355e20fadb9be | 1,513 | py | Python | prac/20200310/pygame_intro.py | yaroslavKonst/PythonPracticum | 2215b169252b6d429f1f38e5f2295d1435256785 | [
"Apache-2.0"
] | 2 | 2020-04-10T22:09:19.000Z | 2020-04-10T22:09:24.000Z | prac/20200310/pygame_intro.py | yaroslavKonst/PythonPracticum | 2215b169252b6d429f1f38e5f2295d1435256785 | [
"Apache-2.0"
] | null | null | null | prac/20200310/pygame_intro.py | yaroslavKonst/PythonPracticum | 2215b169252b6d429f1f38e5f2295d1435256785 | [
"Apache-2.0"
] | null | null | null | import sys, pygame
pygame.init()
world_size = width, height = 10000, 9000
speed = [2, 2]
DX, DY = 0.1, 0.1
position = [width / 2, height / 2]
screen_size = int(DX * width), int(DY * height)
black = 0, 0, 0
screen = pygame.display.set_mode(screen_size)
ball = pygame.image.load("intro_ball.gif")
ballrect = ball.get_rect()
w, h = ballrect.width / DX, ballrect.height / DY
pygame.time.set_timer(pygame.USEREVENT, 10)
drag = False
while 1:
event = pygame.event.wait()
if event.type == pygame.QUIT: sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if position[0] - w/2 < event.pos[0]/DX < position[0] + w/2 and position[1] - h/2 < event.pos[1]/DY < position[1] + h/2:
drag = True
drg_x, drg_y = event.pos[0] / DX - position[0], event.pos[1] / DY - position[1]
if event.type == pygame.MOUSEBUTTONUP:
drag = False
if event.type == pygame.MOUSEMOTION:
if drag:
position[0] = event.pos[0] / DX - drg_x
position[1] = event.pos[1] / DY - drg_y
if event.type == pygame.USEREVENT:
if not drag:
position[0] += speed[0]
position[1] += speed[1]
if position[0] - w/2 < 0 or position[0] + w/2 > width:
speed[0] = -speed[0]
if position[1] - h/2 < 0 or position[1] + h/2 > height:
speed[1] = -speed[1]
screen.fill(black)
screen.blit(ball, (int(DX * (position[0] - w/2)), int(DY * (position[1] - h/2))))
pygame.display.flip()
| 29.666667 | 127 | 0.574356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.010575 |
8922c6587dd08bd4b2be1a2477201fbaf0a8a712 | 18,808 | py | Python | gh.py | old-beans/isaac-gh-discord-bot | 4c0d704475e4e9b546e8359390f79ea4959fc2a9 | [
"MIT"
] | 2 | 2020-09-22T02:01:52.000Z | 2021-03-28T05:56:59.000Z | gh.py | old-beans/gh-isaacbot | 4c0d704475e4e9b546e8359390f79ea4959fc2a9 | [
"MIT"
] | null | null | null | gh.py | old-beans/gh-isaacbot | 4c0d704475e4e9b546e8359390f79ea4959fc2a9 | [
"MIT"
] | null | null | null | #! python
from airtable import Airtable
import discord, fstrings, re, random, os
from datetime import datetime
# datetime object containing current date and time
AIRTABLE_API_KEY = os.getenv('AIRTABLE_API_KEY') # stored in .env
AIRTABLE_BASE_KEY = os.getenv('AIRTABLE_BASE_KEY') # stored in .env
CAMPAIGN_NAME = os.getenv('CAMPAIGN_NAME')
campaign_airtable = Airtable(AIRTABLE_BASE_KEY, 'Campaign')
party_airtable = Airtable(AIRTABLE_BASE_KEY, 'Parties')
characters_airtable = Airtable(AIRTABLE_BASE_KEY, 'Characters')
scenario_airtable = Airtable(AIRTABLE_BASE_KEY, 'Scenarios')
items_airtable = Airtable(AIRTABLE_BASE_KEY, 'Items') # items record lookup
abilities_airtable = Airtable(AIRTABLE_BASE_KEY, 'Character Abilities') # abilities record lookup
classes_airtable = Airtable(AIRTABLE_BASE_KEY, 'Character Classes') # class record lookup
storylines_airtable = Airtable(AIRTABLE_BASE_KEY, 'Storylines')
players_airtable = Airtable(AIRTABLE_BASE_KEY, 'Players')
achievements_airtable = Airtable(AIRTABLE_BASE_KEY, 'Achievements')
class Player:
character_levels = (0,45,95,150,210,275,345,420,500)
prosperity_levels = (0,4,9,15,22,30,39,50,64)
def __init__(self, author):
self.name = author
self.player_rec = players_airtable.match('discordUsername', author)
def activate_character(self, ch_name):
character_rec = characters_airtable.match('name', ch_name)
characters_airtable.update(character_rec['id'], {'discordUsername': [self.player_rec['id']], 'isActive': True})
def create_character(self, ch_name, ch_class):
self.world = World(campaign_airtable.match('name', 'Camp Pain')['id'])
self.party = Party(party_airtable.match('name', 'Wyld Stallyns')['id'])
prosperity = self.world.prosperity
xp = self.character_levels[prosperity]
gold = (prosperity + 1) * 15
charclass = classes_airtable.match('name', ch_class)['id']
characters_airtable.insert(
{
'name': ch_name, 'xp': xp, 'gold': gold, 'checks': 0, 'class': [charclass], 'isActive': True,
'owner': [self.player_rec['id']], 'discordUsername': [self.player_rec['id']],
'campaign': [self.world.campaign_rec['id']], 'party': [self.party.party_rec['id']]
})
print(f"[Isaacbot Logger]--{datetime.now()}-- New Character {ch_name} {ch_class} ")
class World:
# campaign_rec, name, donations, pticks, prosperity, achievements
prosperity_levels = (0,4,9,15,22,30,39,50,64)
donation_levels = (100,150,200,250,300,350,400,500,600,700,800,900,1000)
def __init__(self, campaign_rec_id):
# Use World(character.campaign[0])
self.campaign_rec = campaign_airtable.get(campaign_rec_id)
# campaign name is an env varibale for the bot eg CAMPAIGN_NAME=Camp Pain
self.name = self.campaign_rec['fields']['name']
self.donations = self.campaign_rec['fields']['totalDonations']
self.pticks = self.campaign_rec['fields']['prosperityTicks']
self.prosperity = self.prosperity_calc(self.pticks)
self.achievements = self.campaign_rec['fields']['achievements']
def prosperity_calc(self, pticks):
for i in range(len(self.prosperity_levels)):
# calculate prosperity from the prosperity_levels tuple
if pticks >= self.prosperity_levels[i] and pticks < self.prosperity_levels[i+1]:
prosperity = i+1
break
else:
continue
return prosperity
def gain_prosperity(self):
self.gain_ptick()
if self.pticks in self.prosperity_levels:
self.prosperity += 1
print(f"[Isaacbot Logger]--{datetime.now()}-- +1 Overall Prosperity....{self.name}, {self.prosperity}")
self.unlock_prosperity(self.prosperity)
def gain_ptick(self):
self.pticks += 1
campaign_airtable.update(self.campaign_rec['id'], {'prosperityTicks':self.pticks})
print(f"[Isaacbot Logger]--{datetime.now()}-- Gain prosperity....{self.name}, {self.pticks} ticks")
def lose_ptick(self):
self.pticks -= 1
campaign_airtable.update(self.campaign_rec['id'], {'prosperityTicks':self.pticks})
print(f"[Isaacbot Logger]--{datetime.now()}-- Lose prosperity....{self.name}, {self.pticks} ticks")
def unlock_prosperity(self, level_to_unlock):
items_to_unlock = items_airtable.search('prosperityRequirement', level_to_unlock)
for item in items_to_unlock:
items_airtable.update(item['id'], {'maxCount':item['fields']['realMax'], 'isUnlocked':True})
print(f"[Isaacbot Logger]--{datetime.now()}-- Lvl{level_to_unlock} items unlocked")
def donate(self):
self.donations += 10
campaign_airtable.update(self.campaign_rec['id'], {'totalDonations':self.donations})
print(f"[Isaacbot Logger]--{datetime.now()}-- +10gp donated.....Total: {self.donations}")
def calc_donations_needed(self):
for d in range(len(self.donation_levels)):
if self.donations >= self.donation_levels[d] and self.donations < self.donation_levels [d + 1]:
self.next_donation_level = self.donation_levels[d + 1]
return self.next_donation_level
class Scenario:
# scenario_rec, number
# Scenario details for scen_no
# Can be used to unlock or complete a scenario
# In future: get all available scenarios, add description, scenario info
def __init__(self, scene_no):
self.scenario = scenario_airtable.match('number', int(scene_no))
self.number = int(scene_no)
self.name = ""
self.unlocked = None
self.description = ""
self.complete = None
self.outcome = ""
try:
self.unlocked = self.scenario['fields']['isUnlocked']
self.name = self.scenario['fields']['name']
try:
self.description = self.scenario['fields']['description']
except:
pass
except:
self.unlocked = False
try:
if self.scenario['fields']['isComplete'] == True:
self.complete = True
try:
self.outcome = self.scenario['fields']['outcome']
except:
pass
except:
self.complete = False
self.outcome = ""
def mark_unlocked(self, scene_name, scene_description=''):
self.unlocked = self.scenario['fields']['isUnlocked'] = True
self.name = scene_name
self.description = scene_description
scenario_airtable.update(self.scenario['id'], {'isUnlocked':True, 'name':self.name, 'description': self.description})
print(f"[Isaacbot Logger]--{datetime.now()}-- Scenario {self.number} unlocked")
def mark_complete(self):
self.scenario['fields']['isComplete'] = True
scenario_airtable.update(self.scenario['id'], {'isComplete':True})
print(f"[Isaacbot Logger]--{datetime.now()}-- Scenario {self.number}: {self.name} complete")
def update_description(self, description):
self.description = description
scenario_airtable.update(self.scenario['id'], {'description':description})
print(f"[Isaacbot Logger]--{datetime.now()}--Scenario {self.number} description added -- '{description}'")
def update_outcome(self, outcome):
self.outcome = outcome
scenario_airtable.update(self.scenario['id'], {'outcome':self.outcome})
print(f"[Isaacbot Logger]--{datetime.now()}-- Scenario {self.number} outcome added -- '{outcome}'")
class Party:
# party_rec, name, members, reputation, discount, achievements
discount_levels = (0,3,7,11,15,19)
# reputation levels where discount changes (+ or -)
def __init__(self, party_rec_id):
self.party_rec = party_airtable.get(party_rec_id)
self.name = self.party_rec['fields']['name']
self.members = self.party_rec['fields']['characters']
self.reputation = self.party_rec['fields']['reputation']
self.discount = self.discount_calc(self.reputation)
self.achievements = self.party_rec['fields']['achievements']
def discount_calc(self, reputation):
# determine discount based on reputation. Used for buy action
for j in range(len(self.discount_levels)):
if self.reputation >= 19:
discount = -5
elif abs(self.reputation) >= self.discount_levels[j] and self.reputation < self.discount_levels[j+1]:
discount = -j
break
else:
continue
if reputation < 0:
discount = discount * -1
return discount
def gain_reputation(self):
self.reputation += 1
self.discount = self.discount_calc(self.reputation)
campaign_airtable.update(self.party_rec['id'], {'reputation':self.reputation})
print(f"[Isaacbot Logger]--{datetime.now()}-- Gain Reputation....{self.name}, {self.reputation}")
def lose_reputation(self):
self.reputation -= 1
self.discount = self.discount_calc(self.reputation)
campaign_airtable.update(self.party_rec['id'], {'reputation':self.reputation})
print(f"[Isaacbot Logger]--{datetime.now()}-- Lose Reputation....{self.name}, {self.reputation}")
def gain_achiev(self):
pass
def lose_achiev(self):
pass
class Character:
# character_rec, party, campaign, name, charclass, xp, lvl, gold, checks, items, abilities
character_levels = (0,45,95,150,210,275,345,420,500)
# xp values for character level-up
def __init__(self, author):
self.character_rec = characters_airtable.match('discordUsername', author) #returns dict
self.party = self.character_rec['fields']['party']
# record id
self.campaign = self.character_rec['fields']['campaign']
# record id
self.name = self.character_rec['fields']['name']
self.charclass = classes_airtable.get(self.character_rec['fields']['class'][0])['fields']['name']
self.xp = self.character_rec['fields']['xp']
self.lvl= self.lvl_calc()
self.gold = self.character_rec['fields']['gold']
self.checks = self.character_rec['fields']['checks']
self.ch = self.check_calc()
self.id = self.character_rec['id']
try:
self.items = self.character_rec['fields']['items']
except KeyError:
self.items = []
characters_airtable.update(self.character_rec['id'], {'items':self.items})
finally:
self.item_nums = sorted(items_airtable.get(a)['fields']['number'] for a in self.items)
try:
self.abilities = self.character_rec['fields']['abilities']
except KeyError:
self.abilities = []
characters_airtable.update(self.character_rec['id'], {'abilities':self.abilities})
self.abil_nums = sorted(abilities_airtable.get(a)['fields']['number'] for a in self.abilities)
def retire(self, quest=''):
characters_airtable.update(self.character_rec['id'], {'isActive': False, 'isRetired': True, 'quest': quest})
def deactivate(self):
characters_airtable.update(self.id, {'discordUsername': '', 'isActive': False})
def gain_xp(self, xp_gained):
self.xp += xp_gained
# Input XP gained and it will be added to the author's previous total
characters_airtable.update(self.character_rec['id'], {'xp':self.xp})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Gain {xp_gained}xp Total: {self.xp}xp")
new_lvl = self.lvl_calc()
if new_lvl > self.lvl:
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} leveled up to Lvl {new_lvl}")
self.lvl += 1
return True
else:
return False
def change_xp(self, new_xp):
# update author xp to input
self.xp = new_xp
characters_airtable.update(self.character_rec['id'], {'xp':self.xp})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Total: {self.xp}xp")
new_lvl= self.lvl_calc()
if new_lvl> self.lvl:
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} leveled up to Lvl {new_lvl}")
return True
else:
return False
def lvl_calc(self):
if self.xp >= 500:
level = 9
else:
for i in range(len(self.character_levels)):
if self.character_levels[i] <= self.xp and self.character_levels[i+1] > self.xp:
level = i+1
return level
def gain_gold(self, gold_gained):
# for gold lost use a negative number
self.gold += gold_gained
characters_airtable.update(self.character_rec['id'], {'gold':self.gold})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} +{gold_gained}gp Total: {self.gold}gold")
def change_gold(self, new_gold):
# update author gold to input
self.gold = new_gold
characters_airtable.update(self.character_rec['id'], {'gold':self.gold})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Total: {self.gold}gold")
def gain_checks(self, checks_gained):
# for lose_checks use negative number
self.checks += checks_gained
characters_airtable.update(self.character_rec['id'], {'checks':self.checks})
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} +{checks_gained} checks Total: {self.checks}checks")
self.ch = self.check_calc()
def change_checks(self, new_checks):
self.checks = new_checks
characters_airtable.update(self.character_rec['id'], {'checks':self.checks})
# update author checks to input
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} Total: {self.checks}checks")
self.ch = self.check_calc()
def check_calc(self):
if self.checks == 1:
self.ch = 'check'
else:
self.ch = 'checks'
return self.ch
def level_up(self, abil_to_add):
# abil must be given as a list of Airtable record ID eg [rec92398626]
self. abilities = self.abilities + list(abil_to_add)
characters_airtable.update(self.character_rec['id'], {'abilities':self.abilities})
def item_transaction(self, action, item_num):
item = Item(item_num)
if action == 'gain':
self.items.append(item.item_rec['id'])
self.item_nums = sorted((items_airtable.get(a)['fields']['number'] for a in self.items))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} gain item {item.number}")
elif action == 'lose':
self.items.remove(item.item_rec['id'])
self.item_nums = sorted((items_airtable.get(a)['fields']['number'] for a in self.items))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name}e lose item {item.number}")
elif action == 'loot':
self.items.append(item.item_rec['id'])
self.item_nums = sorted((items_airtable.get(a)['fields']['number'] for a in self.items))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} loot item {item.number}")
characters_airtable.update(self.character_rec['id'], {'items': self.items})
def abil_transaction(self, action, abil_num):
abil = Ability(abil_num)
if action == 'gain':
self.abilities.append(abil.ability['id'])
self.abil_nums = sorted((abilities_airtable.get(a)['fields']['number'] for a in self.abilities))
print(f"[Isaacbot Logger]--{datetime.now()}-- {self.name} gain abil {abil.number}, {self.abil_nums}")
elif action == 'lose':
self.abilities.remove(abil.ability['id'])
self.abil_nums = sorted((abilities_airtable.get(a)['fields']['number'] for a in self.abilities))
print(f"[Isaacbot Logger]--{datetime.now()}-- Ghostface remove abil {abil.number}, {sorted(self.abil_nums)}")
characters_airtable.update(self.character_rec['id'], {'abilities':self.abilities})
class Item:
def __init__(self, item_num):
self.item_rec = items_airtable.match('number', item_num)
self.number = item_num
self.level = self.item_rec['fields']['prosperityRequirement']
try:
self.unlocked = self.item_rec['fields']['isUnlocked']
self.number = self.item_rec['fields']['number']
self.name = self.item_rec['fields']['name']
self.cost = self.item_rec['fields']['cost']
self.text = self.item_rec['fields']['description']
self.numberAvailable = self.item_rec['fields']['numberAvailable']
self.maxCount = self.item_rec['fields']['maxCount']
self.realMax = self.item_rec['fields']['realMax']
self.owners = self.item_rec['fields']['characterCount']
self.num_name = f"{self.number}: {self.name}"
self.description = self.item_rec['fields']['description']
except:
self.unlocked = False
self.numberAvailable = 0
self.maxCount = 0
self.realMax = self.item_rec['fields']['realMax']
def unlock_design(self):
# via gain_prosperity or loot design (all copies become available)
self.unlocked = True
self.maxCount = self.realMax
update = {'isUnlocked':True, 'maxCount':self.maxCount}
items_airtable.update(self.item_rec['id'], update)
print(f"[Isaacbot Logger]--{datetime.now()}-- Item Design {self.number} unlocked")
def unlock_loot(self):
# via loot design
self.unlocked = True
if self.maxCount < self.realMax:
self.maxCount += 1
items_airtable.update(self.item_rec['id'], {'isUnlocked':self.unlocked, 'maxCount':self.maxCount})
print(f"[Isaacbot Logger]--{datetime.now()}-- Item {self.number} looted.")
class Ability:
def __init__(self, abil_num):
self.ability = abilities_airtable.match('number', abil_num)
self.number = abil_num
self.lvl= self.ability['fields']['levelRequired']
self.charclass = classes_airtable.get(self.ability['fields']['class'][0])['fields']['name']
self.name = self.ability['fields']['name']
self.num_name = f"Lvl {self.lvl} -- {self.name}"
print('done')
| 41.245614 | 129 | 0.621969 | 17,729 | 0.942631 | 0 | 0 | 0 | 0 | 0 | 0 | 5,484 | 0.291578 |
892320f63f5d2a0ca42e3afab52c7bbe19958d5f | 13,620 | py | Python | bilbyweb/utility/job.py | ASVO-TAO/SS18B-PLasky | a3c13b05f894fb8cebd5be381c170b8a78adb81a | [
"MIT"
] | null | null | null | bilbyweb/utility/job.py | ASVO-TAO/SS18B-PLasky | a3c13b05f894fb8cebd5be381c170b8a78adb81a | [
"MIT"
] | null | null | null | bilbyweb/utility/job.py | ASVO-TAO/SS18B-PLasky | a3c13b05f894fb8cebd5be381c170b8a78adb81a | [
"MIT"
] | null | null | null | """
Distributed under the MIT License. See LICENSE.txt for more info.
"""
import json
import uuid
from ..utility.display_names import (
OPEN_DATA,
SIMULATED_DATA,
BINARY_BLACK_HOLE,
DYNESTY,
NESTLE,
EMCEE,
FIXED,
UNIFORM,
SKIP,
SUBMITTED,
QUEUED,
IN_PROGRESS,
DRAFT,
COMPLETED,
PENDING,
ERROR,
CANCELLED,
WALL_TIME_EXCEEDED,
OUT_OF_MEMORY,
PUBLIC,
)
from ..models import (
Job,
Data,
Signal,
SignalParameter,
DataParameter,
Prior,
Sampler,
SamplerParameter,
)
from ..forms.signal.signal_parameter import BBH_FIELDS_PROPERTIES
from ..forms.data.data_open import DATA_FIELDS_PROPERTIES as OPEN_DATA_FIELDS_PROPERTIES
from ..forms.data.data_simulated import DATA_FIELDS_PROPERTIES as SIMULATED_DATA_FIELDS_PROPERTIES
from ..forms.sampler.sampler_dynesty import DYNESTY_FIELDS_PROPERTIES
from ..forms.sampler.sampler_nestle import NESTLE_FIELDS_PROPERTIES
from ..forms.sampler.sampler_emcee import EMCEE_FIELDS_PROPERTIES
def clone_job_data(from_job, to_job):
"""
Copy job data across two jobs
:param from_job: instance of Job that will be used as a source
:param to_job: instance of Job that will be used as a target
:return: Nothing
"""
# cloning data and data parameters
try:
from_data = Data.objects.get(job=from_job)
data_created = Data.objects.create(
job=to_job,
data_choice=from_data.data_choice,
)
except Data.DoesNotExist:
pass
else:
# creating the data parameters
data_parameters = DataParameter.objects.filter(data=from_data)
for data_parameter in data_parameters:
DataParameter.objects.create(
data=data_created,
name=data_parameter.name,
value=data_parameter.value,
)
# cloning signal and signal parameters
try:
from_signal = Signal.objects.get(job=from_job)
signal_created = Signal.objects.create(
job=to_job,
signal_choice=from_signal.signal_choice,
signal_model=from_signal.signal_model,
)
except Signal.DoesNotExist:
pass
else:
# creating the signal parameters
signal_parameters = SignalParameter.objects.filter(signal=from_signal)
for signal_parameter in signal_parameters:
SignalParameter.objects.create(
signal=signal_created,
name=signal_parameter.name,
value=signal_parameter.value,
)
# populating prior
priors = Prior.objects.filter(job=from_job)
for prior in priors:
Prior.objects.create(
job=to_job,
name=prior.name,
prior_choice=prior.prior_choice,
fixed_value=prior.fixed_value,
uniform_min_value=prior.uniform_min_value,
uniform_max_value=prior.uniform_max_value,
)
# cloning sampler and sampler parameters
try:
from_sampler = Sampler.objects.get(job=from_job)
sampler_created = Sampler.objects.create(
job=to_job,
sampler_choice=from_sampler.sampler_choice,
)
except Sampler.DoesNotExist:
pass
else:
# creating the sampler parameters
sampler_parameters = SamplerParameter.objects.filter(sampler=from_sampler)
for sampler_parameter in sampler_parameters:
SamplerParameter.objects.create(
sampler=sampler_created,
name=sampler_parameter.name,
value=sampler_parameter.value,
)
class BilbyJob(object):
"""
Class representing a Bilby Job. The bilby job parameters are scattered in different models in the database.
This class used to collects the correct job parameters in one place. It also defines the json representation
of the job.
"""
# variable to hold the Job model instance
job = None
# variable to hold the Data model instance
data = None
# list to hold the Data Parameters instances
data_parameters = None
# variable to hold the Signal instance
signal = None
# list to hold the Signal Parameters instances
signal_parameters = None
# list to hold the Prior instances
priors = None
# variable to hold the Sampler instance
sampler = None
# list to hold the Sampler Parameters instances
sampler_parameters = None
# what actions a user can perform on this job
job_actions = None
def clone_as_draft(self, user):
"""
Clones the bilby job for the user as a Draft Job
:param user: the owner of the new Draft Job
:return: Nothing
"""
if not self.job:
return
# try to generate a unique name for the job owner
name = self.job.name
while Job.objects.filter(user=user, name=name).exists():
name = (self.job.name + '_' + uuid.uuid4().hex)[:255]
# This will be true if the job has 255 Characters in it,
# In this case, we cannot get a new name by adding something to it.
# This can be altered later based on the requirement.
if name == self.job.name:
# cannot generate a new name, returning none
return None
# Once the name is set, creating the draft job with new name and owner and same description
cloned = Job.objects.create(
name=name,
user=user,
description=self.job.description,
)
# copying other parameters of the job
clone_job_data(self.job, cloned)
return cloned
def list_actions(self, user):
"""
List the actions a user can perform on this Job
:param user: User for whom the actions will be generated
:return: Nothing
"""
self.job_actions = []
# Job Owners and Admins get most actions
if self.job.user == user or user.is_admin():
# any job can be copied
self.job_actions.append('copy')
# job can only be deleted if in the following status:
# 1. draft
# 2. completed
# 3. error (wall time and out of memory)
# 4. cancelled
# 5. public
if self.job.status in [DRAFT, COMPLETED, ERROR, CANCELLED, WALL_TIME_EXCEEDED, OUT_OF_MEMORY, PUBLIC]:
self.job_actions.append('delete')
# edit a job if it is a draft
if self.job.status in [DRAFT]:
self.job_actions.append('edit')
# cancel a job if it is not finished processing
if self.job.status in [PENDING, SUBMITTED, QUEUED, IN_PROGRESS]:
self.job_actions.append('cancel')
# completed job can be public and vice versa
if self.job.status in [COMPLETED]:
self.job_actions.append('make_it_public')
elif self.job.status in [PUBLIC]:
self.job_actions.append('make_it_private')
else:
# non admin and non owner can copy a PUBLIC job
if self.job.status in [PUBLIC]:
self.job_actions.append('copy')
def __init__(self, job_id, light=False):
"""
Initialises the Bilby Job
:param job_id: id of the job
:param light: Whether used for only job variable to be initialised atm
"""
# do not need to do further processing for light bilby jobs
# it is used only for status check mainly from the model itself to list the
# actions a user can do on the job
if light:
return
# populating data tab information
try:
self.data = Data.objects.get(job=self.job)
except Data.DoesNotExist:
pass
else:
self.data_parameters = []
# finding the correct data parameters for the data type
all_data_parameters = DataParameter.objects.filter(data=self.data)
if self.data.data_choice == OPEN_DATA:
for name in OPEN_DATA_FIELDS_PROPERTIES.keys():
self.data_parameters.append(all_data_parameters.get(name=name))
elif self.data.data_choice == SIMULATED_DATA:
for name in SIMULATED_DATA_FIELDS_PROPERTIES.keys():
self.data_parameters.append(all_data_parameters.get(name=name))
# populating signal tab information
try:
self.signal = Signal.objects.get(job=self.job)
except Signal.DoesNotExist:
pass
else:
self.signal_parameters = []
self.priors = []
# finding the correct signal parameters for the signal type
all_signal_parameters = SignalParameter.objects.filter(signal=self.signal)
if self.signal.signal_choice == BINARY_BLACK_HOLE:
for name in BBH_FIELDS_PROPERTIES.keys():
self.signal_parameters.append(all_signal_parameters.get(name=name))
# populating prior
# self.priors = Prior.objects.filter(job=self.job)
# would be suffice if ordering is not required
# however for displaying the fields in order the following have been added
all_priors = Prior.objects.filter(job=self.job)
if all_priors.exists():
if self.signal and self.signal.signal_model == BINARY_BLACK_HOLE:
for name in BBH_FIELDS_PROPERTIES.keys():
self.priors.append(all_priors.get(name=name))
# populating sampler tab information
try:
self.sampler = Sampler.objects.get(job=self.job)
except Sampler.DoesNotExist:
pass
else:
self.sampler_parameters = []
# finding the correct sampler parameters for the sampler type
all_sampler_parameters = SamplerParameter.objects.filter(sampler=self.sampler)
if self.sampler.sampler_choice == DYNESTY:
for name in DYNESTY_FIELDS_PROPERTIES.keys():
self.sampler_parameters.append(all_sampler_parameters.get(name=name))
elif self.sampler.sampler_choice == NESTLE:
for name in NESTLE_FIELDS_PROPERTIES.keys():
self.sampler_parameters.append(all_sampler_parameters.get(name=name))
elif self.sampler.sampler_choice == EMCEE:
for name in EMCEE_FIELDS_PROPERTIES.keys():
self.sampler_parameters.append(all_sampler_parameters.get(name=name))
def __new__(cls, *args, **kwargs):
"""
Instantiate the Bilby Job
:param args: arguments
:param kwargs: keyword arguments
:return: Instance of Bilby Job with job variable initialised from job_id if exists
otherwise returns None
"""
result = super(BilbyJob, cls).__new__(cls)
try:
result.job = Job.objects.get(id=kwargs.get('job_id', None))
except Job.DoesNotExist:
return None
return result
def as_json(self):
"""
Generates the json representation of the Bilby Job so that Bilby Core can digest it
:return: Json Representation
"""
# processing data dict
data_dict = dict()
if self.data:
data_dict.update({
'type': self.data.data_choice,
})
for data_parameter in self.data_parameters:
data_dict.update({
data_parameter.name: data_parameter.value,
})
# processing signal dict
signal_dict = dict()
if self.signal and self.signal.signal_choice != SKIP:
signal_dict.update({
'type': self.signal.signal_choice,
})
for signal_parameter in self.signal_parameters:
signal_dict.update({
signal_parameter.name: signal_parameter.value,
})
# processing prior dict
priors_dict = dict()
if self.priors:
for prior in self.priors:
prior_dict = dict()
prior_dict.update({
'type': prior.prior_choice,
})
if prior.prior_choice == FIXED:
prior_dict.update({
'value': prior.fixed_value,
})
elif prior.prior_choice == UNIFORM:
prior_dict.update({
'min': prior.uniform_min_value,
'max': prior.uniform_max_value,
})
priors_dict.update({
prior.name: prior_dict,
})
# processing sampler dict
sampler_dict = dict()
if self.sampler:
sampler_dict.update({
'type': self.sampler.sampler_choice,
})
for sampler_parameter in self.sampler_parameters:
sampler_dict.update({
sampler_parameter.name: sampler_parameter.value,
})
# accumulating all in one dict
json_dict = dict(
name=self.job.name,
description=self.job.description,
data=data_dict,
signal=signal_dict,
priors=priors_dict,
sampler=sampler_dict,
)
# returning json with correct indentation
return json.dumps(json_dict, indent=4)
| 33.880597 | 114 | 0.602423 | 9,953 | 0.730764 | 0 | 0 | 0 | 0 | 0 | 0 | 3,683 | 0.270411 |
89287107bf8b41baf6a2e18560e187efeb94ad6b | 574 | py | Python | activities/migrations/0003_alter_currentactivitie_importance_level.py | CiganOliviu/MyWorkflow | 85951c2e8ebdb3e970fcc0b3e24bd319360b852a | [
"Apache-2.0"
] | null | null | null | activities/migrations/0003_alter_currentactivitie_importance_level.py | CiganOliviu/MyWorkflow | 85951c2e8ebdb3e970fcc0b3e24bd319360b852a | [
"Apache-2.0"
] | null | null | null | activities/migrations/0003_alter_currentactivitie_importance_level.py | CiganOliviu/MyWorkflow | 85951c2e8ebdb3e970fcc0b3e24bd319360b852a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-04 19:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activities', '0002_rename_currentactivity_currentactivitie'),
]
operations = [
migrations.AlterField(
model_name='currentactivitie',
name='importance_level',
field=models.CharField(choices=[('Optional', 'Optional'), ('Important', 'Important'), ('Very important', 'Very important'), ('Critical', 'Critical')], default='None', max_length=25),
),
]
| 30.210526 | 194 | 0.642857 | 481 | 0.837979 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.419861 |
8928910327e7aadda89a6e0cb4b8070d90c7134b | 1,209 | py | Python | src/collector/wechat/wechat_utils.py | AI-xiaofour/2c | 4aa40eb18868ea5eccf7b09b7e64939fb505a58c | [
"Apache-2.0"
] | null | null | null | src/collector/wechat/wechat_utils.py | AI-xiaofour/2c | 4aa40eb18868ea5eccf7b09b7e64939fb505a58c | [
"Apache-2.0"
] | null | null | null | src/collector/wechat/wechat_utils.py | AI-xiaofour/2c | 4aa40eb18868ea5eccf7b09b7e64939fb505a58c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Created by howie.hu at 2021/4/9.
Description:微信相关通用函数
Changelog: all notable changes to this file will be documented
"""
from ruia import Request
from src.config import Config
from src.databases import MongodbManager
async def get_wf_url():
"""
获取 wechat-feeds 资源链接
Github: https://github.com/hellodword/wechat-feeds
:return:
"""
url = "https://wechat.privacyhide.com/VERSION?"
resp = await Request(url=url).fetch()
version = str(await resp.text()).strip()
return f"https://cdn.jsdelivr.net/gh/hellodword/wechat-feeds@{version}/details.json"
def wechat2url(name_list: list):
"""
将微信名称转为 wechat-feeds 对应的url
:param name_list:
:return:
"""
mongo_base = MongodbManager.get_mongo_base(mongodb_config=Config.MONGODB_CONFIG)
coll = mongo_base.get_collection(coll_name="2c_wechat_name")
_rss_tem = "https://gitee.com/BlogZ/wechat-feeds/raw/feeds/{0}.xml"
res_dict = {}
for each in coll.find({"name": {"$in": name_list}}):
rss_url = _rss_tem.format(each["bizid"])
res_dict[each["name"]] = rss_url
return res_dict
if __name__ == "__main__":
wechat2url(Config.WECHAT_LIST)
| 27.477273 | 88 | 0.682382 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.295473 | 613 | 0.486894 |
892b0a91946a1df7a1b4680a474c70df84c8c932 | 857 | py | Python | tests/v2/test_team_response_attributes.py | anbnyc/datadog-api-client-python | 162bd0c6f2523a809aec08a3197e85dc74b78c21 | [
"Apache-2.0"
] | null | null | null | tests/v2/test_team_response_attributes.py | anbnyc/datadog-api-client-python | 162bd0c6f2523a809aec08a3197e85dc74b78c21 | [
"Apache-2.0"
] | null | null | null | tests/v2/test_team_response_attributes.py | anbnyc/datadog-api-client-python | 162bd0c6f2523a809aec08a3197e85dc74b78c21 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import sys
import unittest
import datadog_api_client.v2
from datadog_api_client.v2.model.team_response_attributes import TeamResponseAttributes
class TestTeamResponseAttributes(unittest.TestCase):
"""TeamResponseAttributes unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTeamResponseAttributes(self):
"""Test TeamResponseAttributes"""
# FIXME: construct object with mandatory attributes with example values
# model = TeamResponseAttributes() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.969697 | 108 | 0.733956 | 409 | 0.477246 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.525088 |
892b69c07a92d99e6dd2a8d72d712a862b76cd1f | 442 | py | Python | tests/unit/test_utility_item.py | luiscape/hdxscraper-worldpop | a94ea98d62645a0f2f508f227a642e7168e7281a | [
"MIT"
] | null | null | null | tests/unit/test_utility_item.py | luiscape/hdxscraper-worldpop | a94ea98d62645a0f2f508f227a642e7168e7281a | [
"MIT"
] | 2 | 2016-03-08T15:36:27.000Z | 2016-03-08T15:36:52.000Z | tests/unit/test_utility_item.py | luiscape/hdxscraper-worldpop | a94ea98d62645a0f2f508f227a642e7168e7281a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Unit tests for the item() utility function.
'''
import unittest
from collector.utilities.item import item
class TestUtilityItem(unittest.TestCase):
'''
Tests for the item() utility function.
'''
def test_item_returns_correct_type(self):
'''
Tests that the item() utility function returns the right type.
'''
result = item('bullet')
self.assertIs(type(result), str)
| 21.047619 | 66 | 0.678733 | 286 | 0.647059 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.515837 |
892b8c8a1447b39b4b49a43ec0b4a891164798d6 | 1,696 | py | Python | open/core/scripts/writeup_profile_prompt_generate_view.py | awesome-archive/open | 12fb6267c7d1a7e1f1c08f6112073bb0739e7ed9 | [
"MIT"
] | 105 | 2019-06-01T08:34:47.000Z | 2022-03-15T11:48:36.000Z | open/core/scripts/writeup_profile_prompt_generate_view.py | awesome-archive/open | 12fb6267c7d1a7e1f1c08f6112073bb0739e7ed9 | [
"MIT"
] | 111 | 2019-06-04T15:34:14.000Z | 2022-03-12T21:03:20.000Z | open/core/scripts/writeup_profile_prompt_generate_view.py | awesome-archive/open | 12fb6267c7d1a7e1f1c08f6112073bb0739e7ed9 | [
"MIT"
] | 26 | 2019-09-04T06:06:12.000Z | 2022-01-03T03:40:11.000Z | # flake8: noqa
import json
import time
import requests
from django.conf import settings
from websocket import create_connection
from open.core.scripts.swarm_ml_services import get_random_prompt
from open.core.writeup.constants import TEXT_GENERATION_URL
"""
this script's design was to compare performance behind django channels
and how much overhead it added versus directly hitting the microservice
output:
1.8620352506637574 was the average time in seconds to run.
1.8132854890823364 was the average time in seconds to run directly.
amazingly enough, django channels ... has almost zero overhead wow.
"""
def run():
# dpy runscript writeup_profile_prompt_generate_view
url = f"wss://open.senrigan.io/ws/async/writeup/{TEXT_GENERATION_URL}/session/a-cool-test-session/"
ws = create_connection(url)
start = time.time()
intervals = 50
for _ in range(intervals):
data = get_random_prompt()
ws_msg = json.dumps(data)
ws.send(ws_msg)
result = ws.recv()
end = time.time()
websocket_difference = end - start
print(f"{websocket_difference/intervals} was the average time in seconds to run.")
url = settings.GPT2_MEDUM_API_ENDPOINT
token_key = f"Token {settings.ML_SERVICE_ENDPOINT_API_KEY}"
headers = {"Authorization": token_key}
api_start = time.time()
for _ in range(intervals):
data = get_random_prompt()
response = requests.post(url, json=data, headers=headers)
assert response.status_code == 200
api_end = time.time()
api_difference = api_end - api_start
print(
f"{api_difference / intervals} was the average time in seconds to run directly."
)
| 28.266667 | 103 | 0.723467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.431604 |
892bb5ac486bc86c9bf63c89f4326c4ac07e5e22 | 1,031 | py | Python | utils/whatsthespotprice.py | New-Zealand-Zero/PowerMaker | df1866a838cb4b0bd1629417bf5f5111f60f1cff | [
"Apache-2.0"
] | null | null | null | utils/whatsthespotprice.py | New-Zealand-Zero/PowerMaker | df1866a838cb4b0bd1629417bf5f5111f60f1cff | [
"Apache-2.0"
] | null | null | null | utils/whatsthespotprice.py | New-Zealand-Zero/PowerMaker | df1866a838cb4b0bd1629417bf5f5111f60f1cff | [
"Apache-2.0"
] | 2 | 2021-12-14T01:01:20.000Z | 2021-12-21T07:48:50.000Z | import http.client, urllib.request, urllib.parse, urllib.error, base64, time, json, ast, datetime, math, keys
import logging
logging.basicConfig(filename='io.log', level=logging.INFO, format='%(asctime)s %(message)s')
headers = {
# Request headers
'Ocp-Apim-Subscription-Key': keys.OCP_APIM_SUBSCRIPTION_KEY,
}
def get_spot_price():
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
# print(now)
params = urllib.parse.urlencode({
'$filter': 'PointOfConnectionCode eq \'CML0331\'',
'&filter': 'TradingDate eq datetime'+now+''
})
conn = http.client.HTTPSConnection('emi.azure-api.net')
conn.request("GET", "/real-time-prices/?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
json_data = json.loads(data.decode('utf-8'))
value = json_data[0]['DollarsPerMegawattHour']/1000
logging.info("SPOT PRICE:$%s" ,value)
conn.close()
return value
while(1):
print(get_spot_price())
time.sleep(60) | 29.457143 | 109 | 0.653734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.28128 |
892d73c842c7dab435a1f0f69bb5c20db75d44dd | 518 | py | Python | gen/argo/events/client/__about__.py | argoproj-labs/argo-events-client-python | 3d6e3dffca4a12a490c2963f4ac90c8894948bb5 | [
"Apache-2.0"
] | null | null | null | gen/argo/events/client/__about__.py | argoproj-labs/argo-events-client-python | 3d6e3dffca4a12a490c2963f4ac90c8894948bb5 | [
"Apache-2.0"
] | null | null | null | gen/argo/events/client/__about__.py | argoproj-labs/argo-events-client-python | 3d6e3dffca4a12a490c2963f4ac90c8894948bb5 | [
"Apache-2.0"
] | null | null | null | """About this package."""
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "argo-events"
__summary__ = "Community Maintained Python client for Argo Events"
__uri__ = "https://github.com/argoproj-labs/argo-events-client-python"
__version__ = "v0.1.0"
__author__ = "Yudi Xue"
__email__ = "binarycrayon@gmail.com"
__license__ = "Apache2"
__copyright__ = "Copyright 2020 {0}".format(__author__)
| 20.72 | 70 | 0.685328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.6139 |
892ea50cc3da67caa05d44561bab96944c809088 | 2,465 | py | Python | mfctracker/management/commands/syncusers.py | kevans91/mfctracker | c86f1538229df126081331edef3bfd19c9ef1345 | [
"BSD-2-Clause"
] | 3 | 2016-10-19T05:01:31.000Z | 2019-06-06T18:20:11.000Z | mfctracker/management/commands/syncusers.py | kevans91/mfctracker | c86f1538229df126081331edef3bfd19c9ef1345 | [
"BSD-2-Clause"
] | 3 | 2017-11-28T17:31:58.000Z | 2021-04-12T02:37:27.000Z | mfctracker/management/commands/syncusers.py | kevans91/mfctracker | c86f1538229df126081331edef3bfd19c9ef1345 | [
"BSD-2-Clause"
] | 1 | 2020-06-26T14:05:53.000Z | 2020-06-26T14:05:53.000Z | # Copyright (c) 2016-2019 Oleksandr Tymoshenko <gonzo@bluezbox.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.utils.crypto import get_random_string
from mfctracker.models import Commit, UserProfile
class Command(BaseCommand):
help = 'Create users for every known committer'
def handle(self, *args, **options):
committers = set(Commit.objects.values_list('author', flat=True).distinct())
for committer in committers:
try:
user = User.objects.get(username=committer)
if user.profile is None:
profile = UserProfile.objects.create(share_token=get_random_string(length=8), user=user)
profile.save()
except User.DoesNotExist:
email = '{}@{}'.format(committer, settings.SVN_EMAIL_DOMAIN)
password = get_random_string(length=32)
self.stdout.write('User does not exist, adding: {}'.format(committer, password))
User.objects.create_user(committer, email, password)
| 52.446809 | 108 | 0.723732 | 841 | 0.341176 | 0 | 0 | 0 | 0 | 0 | 0 | 1,443 | 0.585396 |
892ff412cb0abfd15e5fd8375b604f4b8bd19d90 | 399 | py | Python | Exercises/015.py | GuilhermeRds1921/Python3-Guanabara | 24cd85b076e1074a5602e54c420bcc8e70cc1854 | [
"MIT"
] | null | null | null | Exercises/015.py | GuilhermeRds1921/Python3-Guanabara | 24cd85b076e1074a5602e54c420bcc8e70cc1854 | [
"MIT"
] | null | null | null | Exercises/015.py | GuilhermeRds1921/Python3-Guanabara | 24cd85b076e1074a5602e54c420bcc8e70cc1854 | [
"MIT"
] | null | null | null | # Escreva um programa que pergunte a quantidade de Km
# percorridos por um carro alugado e a quantidade de dias pelos
# quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro
# custa R$60 por dia e R$0.15 por Km rodado.
km = float(input("Quantos km percorreu?: "))
dia = int(input("Quantos dias ele foi alugado?: "))
print("O valor a ser pago é: R${:.2f}".format(km * 0.15 + dia * 60)) | 49.875 | 69 | 0.704261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.800499 |
893019fb8b63370b0487a73ea074042709b31d91 | 3,241 | py | Python | utils/make_package.py | j123123/llilc | 9d6a522deb6b0769e449f27a087a9d3a7ab2a255 | [
"MIT"
] | 1,712 | 2015-04-13T22:16:59.000Z | 2022-03-17T18:44:42.000Z | utils/make_package.py | j123123/llilc | 9d6a522deb6b0769e449f27a087a9d3a7ab2a255 | [
"MIT"
] | 770 | 2015-04-13T22:10:20.000Z | 2022-03-15T01:30:06.000Z | utils/make_package.py | j123123/llilc | 9d6a522deb6b0769e449f27a087a9d3a7ab2a255 | [
"MIT"
] | 206 | 2015-04-13T22:17:19.000Z | 2022-02-19T12:30:19.000Z | #!/usr/bin/env python
import sys
import argparse
import os
import subprocess
import platform
import io
import string
try:
# For Python >= 3.0
from urllib.request import urlopen
except ImportError:
# For Python < 3.0
from urllib2 import urlopen
import shutil
import stat
def run(args):
nugetFolder = os.path.join(args.target, ".nuget")
print("\nEnsuring folder: %s" % nugetFolder )
if not os.path.exists(nugetFolder):
os.makedirs(nugetFolder)
nugetExe = os.path.join(nugetFolder, "nuget.exe")
if not os.path.exists(nugetExe):
nugetOrg = "http://nuget.org/nuget.exe"
print("Downloading... %s" % nugetOrg )
response = urlopen(nugetOrg)
output = open(nugetExe,'wb')
output.write(response.read())
output.close()
# Ensure it's executable
st = os.stat(nugetExe)
os.chmod(nugetExe, st.st_mode | stat.S_IEXEC)
if (sys.platform != "win32"):
# shutil.which can be used for python 3.3 or later, instead.
for mono in ["/usr/bin/mono", "/usr/local/bin/mono"]:
if os.path.exists(mono):
monopath = mono
if not monopath:
raise "mono is required to run nuget.exe"
nugetExe = monopath + " " + nugetExe
nugetSpec = os.path.join(nugetFolder, os.path.basename(args.nuspec))
if args.nuspec != nugetSpec:
print("\nCopying " + args.nuspec + " to " + nugetSpec)
shutil.copyfile(args.nuspec, nugetSpec)
if args.json != None:
nugetJson = os.path.join(nugetFolder, os.path.basename(args.json))
if args.json != nugetJson:
print("\nCopying " + args.json + " to " + nugetJson)
shutil.copyfile(args.json, nugetJson)
nugetCommand = nugetExe + " pack " + nugetSpec \
+ " -NoPackageAnalysis -NoDefaultExcludes" \
" -OutputDirectory %s" % nugetFolder
ret = os.system(nugetCommand)
return ret
def main(argv):
parser = argparse.ArgumentParser(description=
"Download nuget and run it to create a package using the given nuspec. " \
"Example: make_package.py " \
"--target f:\llilc-rel\\bin\Release " \
"--nuspec f:\llilc\lib\ObjWriter\.nuget\Microsoft.Dotnet.ObjectWriter.nuspec",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--target", metavar="PATH",
default=None,
help="path to a target directory that contains files that will " \
"packaged")
parser.add_argument("--nuspec", metavar="PATH",
default=None,
help="path to a nuspec file. This file is assumed to be under " \
"a child directory (.nuget) of the target by convetion")
parser.add_argument("--json", metavar="PATH",
default=None,
help="path to a json file. This file is used to create " \
"a redirection package")
args,unknown = parser.parse_known_args(argv)
if unknown:
print("Unknown argument(s): ", ", ".join(unknown))
return -3
returncode=0
if args.target == None:
print("--target is not specified.")
return -3
if args.nuspec == None:
print("--nuspec is not specified")
return -3
returncode = run(args)
return returncode
if __name__ == "__main__":
returncode = main(sys.argv[1:])
sys.exit(returncode)
| 30.575472 | 81 | 0.650108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,029 | 0.317299 |
8932628f1e0bc2d8c1fc917f3837c4fdac64e6f9 | 192 | py | Python | wireapp/wireapp/doctype/mpesa_payment/mpesa_payment.py | saleemdev/wireapp | 7d39d07391ddad23539cfdf38369082f708d7294 | [
"MIT"
] | null | null | null | wireapp/wireapp/doctype/mpesa_payment/mpesa_payment.py | saleemdev/wireapp | 7d39d07391ddad23539cfdf38369082f708d7294 | [
"MIT"
] | null | null | null | wireapp/wireapp/doctype/mpesa_payment/mpesa_payment.py | saleemdev/wireapp | 7d39d07391ddad23539cfdf38369082f708d7294 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Salim and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class MPESAPayment(Document):
pass
| 21.333333 | 49 | 0.791667 | 35 | 0.182292 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.5625 |
8932d8f09ea333a4cd978f80748034ddb383b8b6 | 3,292 | py | Python | Adventure Game.py | CodeMaster7000/Adventure-Game | 7ec5e39315f342d7a4cf442caa1b0c0947bb51b6 | [
"MIT"
] | null | null | null | Adventure Game.py | CodeMaster7000/Adventure-Game | 7ec5e39315f342d7a4cf442caa1b0c0947bb51b6 | [
"MIT"
] | null | null | null | Adventure Game.py | CodeMaster7000/Adventure-Game | 7ec5e39315f342d7a4cf442caa1b0c0947bb51b6 | [
"MIT"
] | null | null | null | import time
import random
def main():
print("You are trying to find your way to the centre of a maze where there is a pot of gold!")
time.sleep(2)
print("What you don't know is that this is a dangerous maze with traps and hazards.")
time.sleep(2)
start = input ("Do you want to enter the maze? (y/n)")
if start == "y" or start == "Y" or start == "Yes" or start == "yes":
print ("Welcome to the maze.")
print("You enter the maze...")
time.sleep(2)
print("You reach a opening in the wall and go through it...")
time.sleep(2)
print("You can go left (L) or right (R)")
answer = input("Make your choice ... ")
print("You chose",answer,"... what will happen? ")
time.sleep(2)
print("You turn a corner...")
time.sleep(2)
print("You take a few steps forward...")
time.sleep(2)
if answer == "R" or answer == "r" or answer == "right" or answer == "Right":
print("...and fall down a trapdoor!")
time.sleep(1)
print ("After falling down for few seconds, you arrived in a damp room with no light.")
A = input ("Do you want to explore the room? (Y/N)")
if A == "Y" or A == "y" or A == "Yes" or A == "yes":
print ("Exploring. . .")
time.sleep(2)
print ("You found a torch and two stones!")
AN = input ("Ignite the torch? (Y/N)")
if AN == "Y":
print ("Igniting the torch...")
time.sleep(2)
items = ["You failed to ignite the torch. After a few hours in the dark, you died due to hypothermia. No one could find your dead body.",
"You succeeded! With the torch, you found many pots of gold... But where is the way to escape?"]
ri = random.choice (items)
print (ri)
else:
time.sleep(2)
print ("After a few hours in dark, you died due to hypothermia. No one could find your dead body.")
else:
time.sleep(2)
print ("You were too afraid to move an inch in the dark, so you stayed... and never saw the sun again.")
else:
print('''...and see a beautiful grassy patch lined with trees!
There is a pot of gold at the end!''')
time.sleep(2)
print("You run towards the pot of gold, shining under the bright sunshine...")
time.sleep(2)
print(". . .and find out that the pot of gold was an illusion.")
time.sleep(2)
print("The beautiful grassy patch was just a grey path covered with mold, and the trees were just tall torches!")
time.sleep(3)
print("And the bright sunshine was...")
time.sleep(2)
print("An enormous burning stone falling directly towards you! \n")
time.sleep(2)
print("Farewell, explorer. You encountered death!")
elif start == "n":
print ("Well, well, you coward! Come back whenever you feel brave enough to start!")
main()
| 43.315789 | 156 | 0.526428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,704 | 0.517618 |
89333b9df818e24672d6ad9bcfb169b6126774e6 | 2,081 | py | Python | jcpdstools/view/mainwidget.py | SHDShim/JCPDSTools | 4a4c1bd22a3f71c3285b5877d5d22b2702b7550c | [
"Apache-2.0"
] | 3 | 2020-08-06T09:26:00.000Z | 2021-08-10T14:13:58.000Z | jcpdstools/view/mainwidget.py | SHDShim/JCPDSTools | 4a4c1bd22a3f71c3285b5877d5d22b2702b7550c | [
"Apache-2.0"
] | null | null | null | jcpdstools/view/mainwidget.py | SHDShim/JCPDSTools | 4a4c1bd22a3f71c3285b5877d5d22b2702b7550c | [
"Apache-2.0"
] | null | null | null | import os
from PyQt5 import QtWidgets
from .qtd import Ui_MainWindow
from version import __version__
from utils import SpinBoxFixStyle
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
"""
Main window
"""
def __init__(self, parent=None):
# initialization of the superclass
super(MainWindow, self).__init__(parent)
self.setupUi(self) # setup the GUI --> function generated by pyuic5
env = os.environ['CONDA_DEFAULT_ENV']
self.setWindowTitle("JCPDSTools ver. " + str(__version__) + " on " + env)
#
self.build_ui()
#self.connect_channel()
def build_ui(self):
self.pushButton_WriteDioptasJCPDS.setDisabled(True)
self.doubleSpinBox_CellParamA.setKeyboardTracking(False)
self.doubleSpinBox_CellParamA.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_CellParamB.setKeyboardTracking(False)
self.doubleSpinBox_CellParamB.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_CellParamC.setKeyboardTracking(False)
self.doubleSpinBox_CellParamC.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_CellParamAlpha.setKeyboardTracking(False)
self.doubleSpinBox_CellParamAlpha.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_CellParamBeta.setKeyboardTracking(False)
self.doubleSpinBox_CellParamBeta.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_CellParamGamma.setKeyboardTracking(False)
self.doubleSpinBox_CellParamGamma.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_K0.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_K0p.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_MinDsp.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_MinInt.setStyle(SpinBoxFixStyle())
self.doubleSpinBox_K0.setKeyboardTracking(False)
self.doubleSpinBox_K0p.setKeyboardTracking(False)
self.doubleSpinBox_MinDsp.setKeyboardTracking(False)
self.doubleSpinBox_MinInt.setKeyboardTracking(False)
def closeEvent(self, event):
self.deleteLater()
event.accept()
| 42.469388 | 81 | 0.739068 | 1,943 | 0.933686 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.084575 |
8933583a90eda099c254b38c7fb555a1d3d870bb | 4,622 | py | Python | tests/unit_tests/test_tethys_services/test_admin.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | 79 | 2015-10-05T13:13:28.000Z | 2022-02-01T12:30:33.000Z | tests/unit_tests/test_tethys_services/test_admin.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | 542 | 2015-08-12T22:11:32.000Z | 2022-03-29T22:18:08.000Z | tests/unit_tests/test_tethys_services/test_admin.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | 71 | 2016-01-16T01:03:41.000Z | 2022-03-31T17:55:54.000Z | import unittest
from unittest import mock
from django.utils.translation import ugettext_lazy as _
from tethys_services.models import DatasetService, SpatialDatasetService, WebProcessingService, PersistentStoreService
from tethys_services.admin import DatasetServiceForm, SpatialDatasetServiceForm, WebProcessingServiceForm,\
PersistentStoreServiceForm, DatasetServiceAdmin, SpatialDatasetServiceAdmin, WebProcessingServiceAdmin,\
PersistentStoreServiceAdmin
class TestTethysServicesAdmin(unittest.TestCase):
def setUp(self):
self.expected_labels = {
'public_endpoint': _('Public Endpoint')
}
def tearDown(self):
pass
def test_DatasetServiceForm(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'engine', 'endpoint', 'public_endpoint', 'apikey', 'username', 'password')
ret = DatasetServiceForm(mock_args)
self.assertEqual(DatasetService, ret.Meta.model)
self.assertEqual(expected_fields, ret.Meta.fields)
self.assertTrue('password' in ret.Meta.widgets)
self.assertEqual(self.expected_labels, ret.Meta.labels)
def test_SpatialDatasetServiceForm(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'engine', 'endpoint', 'public_endpoint', 'apikey', 'username', 'password')
ret = SpatialDatasetServiceForm(mock_args)
self.assertEqual(SpatialDatasetService, ret.Meta.model)
self.assertEqual(expected_fields, ret.Meta.fields)
self.assertTrue('password' in ret.Meta.widgets)
self.assertEqual(self.expected_labels, ret.Meta.labels)
def test_WebProcessingServiceForm(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'endpoint', 'public_endpoint', 'username', 'password')
ret = WebProcessingServiceForm(mock_args)
self.assertEqual(WebProcessingService, ret.Meta.model)
self.assertEqual(expected_fields, ret.Meta.fields)
self.assertTrue('password' in ret.Meta.widgets)
self.assertEqual(self.expected_labels, ret.Meta.labels)
def test_PersistentStoreServiceForm(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'engine', 'host', 'port', 'username', 'password')
ret = PersistentStoreServiceForm(mock_args)
self.assertEqual(PersistentStoreService, ret.Meta.model)
self.assertEqual(expected_fields, ret.Meta.fields)
self.assertTrue('password' in ret.Meta.widgets)
def test_DatasetServiceAdmin(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'engine', 'endpoint', 'public_endpoint', 'apikey', 'username', 'password')
ret = DatasetServiceAdmin(mock_args, mock_args)
self.assertEqual(DatasetServiceForm, ret.form)
self.assertEqual(expected_fields, ret.fields)
def test_SpatialDatasetServiceAdmin(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'engine', 'endpoint', 'public_endpoint', 'apikey', 'username', 'password')
ret = SpatialDatasetServiceAdmin(mock_args, mock_args)
self.assertEqual(SpatialDatasetServiceForm, ret.form)
self.assertEqual(expected_fields, ret.fields)
def test_WebProcessingServiceAdmin(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'endpoint', 'public_endpoint', 'username', 'password')
ret = WebProcessingServiceAdmin(mock_args, mock_args)
self.assertEqual(WebProcessingServiceForm, ret.form)
self.assertEqual(expected_fields, ret.fields)
def test_PersistentStoreServiceAdmin(self):
mock_args = mock.MagicMock()
expected_fields = ('name', 'engine', 'host', 'port', 'username', 'password')
ret = PersistentStoreServiceAdmin(mock_args, mock_args)
self.assertEqual(PersistentStoreServiceForm, ret.form)
self.assertEqual(expected_fields, ret.fields)
def test_admin_site_register(self):
from django.contrib import admin
registry = admin.site._registry
self.assertIn(DatasetService, registry)
self.assertIsInstance(registry[DatasetService], DatasetServiceAdmin)
self.assertIn(SpatialDatasetService, registry)
self.assertIsInstance(registry[SpatialDatasetService], SpatialDatasetServiceAdmin)
self.assertIn(WebProcessingService, registry)
self.assertIsInstance(registry[WebProcessingService], WebProcessingServiceAdmin)
self.assertIn(PersistentStoreService, registry)
self.assertIsInstance(registry[PersistentStoreService], PersistentStoreServiceAdmin)
| 43.603774 | 118 | 0.720251 | 4,152 | 0.898312 | 0 | 0 | 0 | 0 | 0 | 0 | 548 | 0.118563 |