content stringlengths 5 1.05M |
|---|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The python library to implement the Trimmed Match estimator.
See the tech details in https://ai.google/research/pubs/pub48448/.
"""
import dataclasses
from typing import List, Set
import warnings
import numpy as np
from scipy import stats
from trimmed_match.core.python import estimator_ext
# A class to report the Trimmed Match estimator for a fixed trim rate:
# trim_rate: float
# iroas: float
# std_error: float
TrimAndError = estimator_ext.TrimAndError
# At least one pair must have absolute spend differrence above this value
_MIN_SPEND_GAP = 1e-10
# This trim rate removes half of the data
RATE_TO_TRIM_HALF_DATA = 0.25
@dataclasses.dataclass
class Report:
"""A class to report the Trimmed Match estimator.
Attributes:
estimate: float, point estimate.
std_error: float, standard error.
trim_rate: float, trim rate.
confidence: float, confidence level.
conf_interval_low: float, lower bound of the confidence interval.
conf_interval_up: float, upper bound of the confidence interval.
epsilons: List[float], difference of uninfluenced responses.
trimmed_pairs_indices: Set[int], the indices of trimmed pairs.
candidate_results: List[TrimAndError], result for each candidate trim rate.
"""
estimate: float
std_error: float
trim_rate: float
confidence: float
conf_interval_low: float
conf_interval_up: float
epsilons: List[float]
trimmed_pairs_indices: Set[int]
candidate_results: List["TrimAndError"]
def __str__(self) -> str:
"""Returns a humanized textual representation of the object."""
return """estimate=%.5f,
std_error=%.5f,
trim_rate=%.5f,
confidence=%.2f,
conf_interval_low=%.5f,
conf_interval_up=%.5f""" % (self.estimate, self.std_error,
self.trim_rate, self.confidence,
self.conf_interval_low,
self.conf_interval_up)
_VectorPerturb = lambda x, y: (x + y) * (1 + y)
class TrimmedMatch(object):
"""The TrimmedMatch estimator.
Example usage:
delta_response = [1, 10, 3, 8]
delta_spend = [1, 5, 2, 5]
max_trim_rate = 0.25
tm = TrimmedMatch(delta_response, delta_spend, max_trim_rate)
report = tm.Report()
"""
def __init__(self,
delta_response: List[float],
delta_spend: List[float],
max_trim_rate: float = RATE_TO_TRIM_HALF_DATA):
"""Initializes the class.
Args:
delta_response: List[float], response difference between the treatment geo
and the control geo for each pair.
delta_spend: List[float], spend difference for each pair.
max_trim_rate: float, default 0.25.
Raises:
ValueError: the lengths of delta_response and delta_spend differ, or
max_trim_rate is negative.
"""
def HasTies():
"""Checks if delta_spend or {theta[i,j]: i<j} has duplicated values.
Note theta[i,j] is the ratio of (delta_response[i] - delta_response[j]) to
(delta_spend[i] - delta_spend[j]), see Lemma 2 in
https://arxiv.org/pdf/1908.02922.pdf.
Returns:
2 if ties exist in delta_spend
1 if ties exist in thetaij but not in delta_spend
0 otherwise.
"""
dresponse = np.array(delta_response)
dspend = np.array(delta_spend)
# check ties in delta_spend
dspend_has_ties = (len(np.unique(dspend)) != len(dspend))
if not dspend_has_ties:
# check ties in thetaij
delta2_response = dresponse[:, None] - dresponse[None, :]
delta2_spend = dspend[:, None] - dspend[None, :]
upper_indices = np.triu_indices(len(dresponse), k=1)
thetaij = delta2_response[upper_indices] / delta2_spend[upper_indices]
thetaij_has_ties = (len(np.unique(thetaij)) != len(thetaij))
if not thetaij_has_ties:
return 0
else:
warnings.warn("thetaij has ties! Breaking ties with perturbation.")
return 1
else:
warnings.warn("delta_spend has ties! Breaking ties with perturbation.")
return 2
if len(delta_response) != len(delta_spend):
raise ValueError("Lengths of delta_response and delta_spend differ.")
if max_trim_rate < 0.0:
raise ValueError("max_trim_rate is negative.")
if np.max(np.abs(delta_spend)) < _MIN_SPEND_GAP:
raise ValueError("delta_spends are all too close to 0!")
self._max_trim_rate = max_trim_rate
self._delta_response = delta_response
self._delta_spend = delta_spend
# adding small amount of non-linear perburtation to break potential ties.
# c.f. Algorithm 1 in https://arxiv.org/pdf/1908.02922.pdf
ties = HasTies()
if ties == 0:
perturb_dspend = perturb_dresponse = np.zeros(len(delta_response))
else:
perturb_dresponse = np.arange(len(delta_response))**1.5
perturb_dresponse = perturb_dresponse - np.median(perturb_dresponse)
if ties == 2:
perturb_dspend = np.arange(len(delta_spend)) - len(delta_spend) * 0.5
else:
perturb_dspend = np.zeros(len(delta_response))
perturb_dspend, perturb_dresponse = [
perturb_dspend * np.finfo(float).eps,
perturb_dresponse * np.finfo(float).eps
]
self._tm = estimator_ext.TrimmedMatch(
_VectorPerturb(np.array(delta_response), perturb_dresponse),
_VectorPerturb(np.array(delta_spend), perturb_dspend),
min(0.5 - 1.0 / len(delta_response), max_trim_rate))
def _CalculateEpsilons(self, iroas: float) -> List[float]:
"""Calculates delta_response - delta_cost * iroas."""
epsilons = []
zip_two_deltas = zip(self._delta_response, self._delta_spend)
for delta1, delta2 in zip_two_deltas:
epsilons.append(delta1 - delta2 * iroas)
return epsilons
def Report(self, confidence: float = 0.80, trim_rate: float = -1.0) -> Report:
"""Reports the Trimmed Match estimation.
Args:
confidence: float, the confidence level for the two-sided confidence
interval, default 0.8.
trim_rate: float, trim rate, a value outside [0, max_trim_rate) triggers
the data-driven choice described in the Trimmed Match paper.
Returns:
Report, as defined in the class Report above.
Raises:
ValueError: confidence is outside of (0, 1] or trim_rate > max_trim_rate.
"""
if (confidence <= 0.0) | (confidence > 1.0):
raise ValueError("Confidence is outside of (0, 1]")
if trim_rate > self._max_trim_rate:
raise ValueError(f"trim_rate {trim_rate} is greater than max_trim_rate "
f"which is {self._max_trim_rate}.")
output = self._tm.Report(stats.norm.ppf(0.5 + 0.5 * confidence), trim_rate)
epsilons = self._CalculateEpsilons(output.estimate)
temp = np.array(epsilons).argsort()
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp))
num_pairs = len(ranks)
left_trim = np.ceil(num_pairs * output.trim_rate)
trimmed_pairs_indices = set([
i for i in np.arange(len(ranks))
if (ranks[i] < left_trim) or (ranks[i] > num_pairs - left_trim - 1)
])
return Report(output.estimate, output.std_error, output.trim_rate,
confidence, output.conf_interval_low, output.conf_interval_up,
epsilons, trimmed_pairs_indices, output.candidate_results)
|
"""
Hooks for customizing login with social providers
https://django-allauth.readthedocs.io/en/latest/advanced.html
"""
from allauth.account.signals import user_logged_in
from common.helpers.front_end import section_url
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from allauth.account.adapter import DefaultAccountAdapter
from django.dispatch import receiver
from common.helpers.constants import FrontEndSection
from common.helpers.error_handlers import ReportableError
from common.helpers.s3 import copy_external_thumbnail_to_s3
from civictechprojects.models import ProjectFile, FileCategory
from democracylab.models import Contributor
from django.contrib.auth.models import User
from django.utils import timezone
import simplejson as json
class MissingOAuthFieldError(ReportableError):
"""Exception raised when required fields are not returned from OAuth
Attributes:
missing_fields -- description of missing fields
message -- explanation of the error to be reported in the logs
"""
def __init__(self, message, provider, missing_fields):
super().__init__(message, {'provider': provider, 'missing_fields': missing_fields})
class MyAccountAdapter(DefaultAccountAdapter):
def get_login_redirect_url(self, request):
if 'prev_page' in request.session:
prev_page = request.session['prev_page']
prev_page_args = request.session['prev_page_args']
del request.session['prev_page']
del request.session['prev_page_args']
redirect_url = '/' if prev_page.strip('/') == '' else section_url(prev_page, prev_page_args)
else:
redirect_url = section_url(FrontEndSection.Home)
return redirect_url
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def new_user(self, request, sociallogin):
email = sociallogin.account.get_provider().extract_common_fields(
sociallogin.account.extra_data).get('email').lower()
assert email
# This account may actually belong to an existing user
user = User.objects.filter(username=email).first()
if user:
# Preserve current password (sociallogin assigns an unusable password)
if user.has_usable_password():
sociallogin.account.extra_data.update(password=user.password)
return Contributor.objects.get_by_natural_key(user.username)
else:
return Contributor(email_verified=True, last_login=timezone.now())
def pre_social_login(self, request, sociallogin):
"""
Invoked just after a user successfully authenticates via a
social provider, but before the login is actually processed.
You can use this hook to intervene, e.g. abort the login by
raising an ImmediateHttpResponse
"""
# standardizing fields across different providers
provider = sociallogin.account.get_provider()
data = provider.extract_common_fields(
sociallogin.account.extra_data)
full_name = data.get('name')
first_name = data.get('first_name')
last_name = data.get('last_name')
if full_name or (first_name and last_name):
sociallogin.user.first_name = first_name or full_name.split()[0]
sociallogin.user.last_name = last_name or ' '.join(full_name.split()[1:])
# Set username to lowercase email
sociallogin.user.username = sociallogin.user.email.lower()
password = sociallogin.account.extra_data.get('password')
if password:
sociallogin.user.password = password
if sociallogin.is_existing:
sociallogin.user.save() # Update only the user
return
# Upsert the User and the SocialAccount
sociallogin.connect(request, sociallogin.user)
@receiver(user_logged_in)
def set_avatar_at_login(sender, sociallogin, **kwargs):
owner = sociallogin.user.contributor
user_avatar_url = sociallogin.account.get_provider().get_avatar_url(sociallogin)
if user_avatar_url:
file_json = copy_external_thumbnail_to_s3(user_avatar_url, sociallogin.account.provider, owner)
ProjectFile.replace_single_file(owner, FileCategory(file_json['file_category']), file_json)
|
#!/usr/bin/env python
# coding: latin-1
"""
CUBA example with delays.
"""
import sys, time, os
from brian2 import *
standalone = int(sys.argv[-2])
n_threads = int(sys.argv[-1])
path = 'data_cuba_%d' %n_threads
if standalone == 1:
set_device('cpp_standalone')
brian_prefs.codegen.cpp_standalone.openmp_threads = n_threads
start = time.time()
n_cells = 20000
n_exc = int(0.8*n_cells)
p_conn = 0.1
taum = 20 * ms
taue = 5 * ms
taui = 10 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV
eqs = Equations('''
dv/dt = (ge+gi-(v-El))/taum : volt
dge/dt = -ge/taue : volt
dgi/dt = -gi/taui : volt
''')
P = NeuronGroup(n_cells, model=eqs, threshold='v>Vt', reset='v=Vr', refractory=5 * ms)
P.v = Vr + rand(len(P)) * (Vt - Vr)
P.ge = 0 * mV
P.gi = 0 * mV
Pe = P[0:n_exc]
Pi = P[n_exc:]
we = (60 * 0.27 / 10) # excitatory synaptic weight (voltage)
wi = (-20 * 4.5 / 10) # inhibitory synaptic weight
Se = Synapses(Pe, P, model = 'w : 1', pre = 'ge += w*mV')
Se.connect('i != j', p=p_conn)
Se.w = '%g' %(we)
Se.delay ='rand()*ms'
Si = Synapses(Pi, P, model = 'w : 1', pre = 'gi += w*mV')
Si.connect('i != j', p=p_conn)
Si.w = '%g' %(wi)
Si.delay ='rand()*ms'
spike_mon = SpikeMonitor(P)
net = Network(P, Se, Si, spike_mon, name='stdp_net')
if standalone == 1:
device.insert_code('main', 'std::clock_t start = std::clock();')
net.run(10 * second, report='text')
if standalone == 1:
device.insert_code('main', '''
std::ofstream myfile ("speed.txt");
if (myfile.is_open())
{
double value = (double) (std::clock() - start)/(%d * CLOCKS_PER_SEC);
myfile << value << std::endl;
myfile.close();
}
''' %(max(1, n_threads)))
try:
os.removedirs(path)
except Exception:
pass
if standalone == 1:
device.build(project_dir=path, compile_project=True, run_project=True, debug=False)
|
from typing import Tuple, cast
import reapy_boost
from reapy_boost import reascript_api as RPR
from reapy_boost.core import ReapyObject
class Source(ReapyObject):
def __init__(self, id: str) -> None:
self.id = id
def __eq__(self, other: object) -> bool:
return isinstance(other, Source) and self.id == other.id
@property
def _args(self) -> Tuple[str]:
return self.id,
def delete(self) -> None:
"""
Delete source. Be sure that no references to source remains.
"""
RPR.PCM_Source_Destroy(self.id) # type:ignore
@property
def filename(self) -> str:
"""
Return source file name.
Returns
-------
filename : str
Source file name.
"""
_, filename, _ = RPR.GetMediaSourceFileName( # type:ignore
self.id, "", 10**5)
return filename
@property
def has_valid_id(self) -> bool:
"""
Whether ReaScript ID is still valid.
For instance, if source has been deleted, ID will not be valid
anymore.
:type: bool
"""
return self._has_valid_id_inside()
@reapy_boost.inside_reaper()
def _has_valid_id_inside(self) -> bool:
pointer, name = self._get_pointer_and_name()
return any(
RPR.ValidatePtr2(project.id, pointer, name) # type:ignore
for project in reapy_boost.get_projects()
)
def length(self, unit: str = "seconds") -> float:
"""
Return source length in `unit`.
Parameters
----------
unit : {"beats", "seconds"}
Returns
-------
length : float
Source length in `unit`.
"""
length, _, is_quantized = RPR.GetMediaSourceLength( # type:ignore
self.id, 0)
length = cast(float, length)
if is_quantized:
if unit == "beats":
return length
else:
# elif unit == "seconds":
raise NotImplementedError()
else:
if unit == "beats":
raise NotImplementedError()
else:
# elif unit == "seconds":
return length
@property
def n_channels(self) -> int:
"""
Return number of channels in source media.
Returns
-------
n_channels : int
Number of channels in source media.
"""
n_channels = RPR.GetMediaSourceNumChannels(self.id) # type:ignore
return n_channels
@property
def sample_rate(self) -> int:
"""
Return source sample rate.
Returns
-------
sample_rate : int
Source sample rate.
"""
sample_rate = RPR.GetMediaSourceSampleRate(self.id) # type:ignore
return sample_rate
@property
def type(self) -> str:
"""
Return source type ("WAV, "MIDI", etc.).
Returns
-------
type : str
Source type.
"""
_, type, _ = RPR.GetMediaSourceType(self.id, "", 10**5) # type:ignore
return type
|
from django.shortcuts import get_object_or_404
from mayan.apps.documents.models.document_models import Document
from mayan.apps.rest_api.api_view_mixins import ExternalObjectAPIViewMixin
from mayan.apps.rest_api import generics
from ..permissions import (
permission_workflow_instance_transition,
permission_workflow_template_view, permission_workflow_tools
)
from ..serializers import (
WorkflowInstanceLaunchSerializer, WorkflowInstanceSerializer,
WorkflowInstanceLogEntrySerializer, WorkflowTemplateTransitionSerializer
)
class APIWorkflowInstanceLaunchActionView(generics.ObjectActionAPIView):
"""
post: Launch a new workflow instance for the specified document.
"""
lookup_url_kwarg = 'document_id'
mayan_object_permissions = {
'POST': (permission_workflow_tools,)
}
serializer_class = WorkflowInstanceLaunchSerializer
queryset = Document.valid.all()
def get_serializer_extra_context(self):
obj = self.get_object()
return {
'document': obj, 'document_type': obj.document_type
}
def object_action(self, request, serializer):
workflow_template = serializer.validated_data['workflow_template_id']
workflow_template.launch_for(document=self.object)
class APIWorkflowInstanceListView(
ExternalObjectAPIViewMixin, generics.ListAPIView
):
"""
get: Returns a list of all the document workflow instances.
"""
external_object_queryset = Document.valid.all()
external_object_pk_url_kwarg = 'document_id'
mayan_external_object_permissions = {
'GET': (permission_workflow_template_view,),
}
mayan_object_permissions = {
'GET': (permission_workflow_template_view,),
}
serializer_class = WorkflowInstanceSerializer
def get_queryset(self):
return self.external_object.workflows.all()
class APIWorkflowInstanceDetailView(
ExternalObjectAPIViewMixin, generics.RetrieveAPIView
):
"""
get: Return the details of the selected document workflow instances.
"""
external_object_queryset = Document.valid.all()
external_object_pk_url_kwarg = 'document_id'
lookup_url_kwarg = 'workflow_instance_id'
mayan_external_object_permissions = {
'GET': (permission_workflow_template_view,),
}
mayan_object_permissions = {
'GET': (permission_workflow_template_view,),
}
serializer_class = WorkflowInstanceSerializer
def get_queryset(self):
return self.external_object.workflows.all()
class APIWorkflowInstanceLogEntryDetailView(
ExternalObjectAPIViewMixin, generics.RetrieveAPIView
):
"""
get: Return the details of the selected document instances log entry.
"""
external_object_pk_url_kwarg = 'document_id'
external_object_queryset = Document.valid.all()
mayan_external_object_permissions = {
'GET': (permission_workflow_template_view,),
}
serializer_class = WorkflowInstanceLogEntrySerializer
lookup_url_kwarg = 'workflow_instance_log_entry_id'
def get_queryset(self):
return self.get_workflow_instance().log_entries.all()
def get_workflow_instance(self):
workflow = get_object_or_404(
klass=self.external_object.workflows,
pk=self.kwargs['workflow_instance_id']
)
return workflow
class APIWorkflowInstanceLogEntryListView(
ExternalObjectAPIViewMixin, generics.ListCreateAPIView
):
"""
get: Returns a list of all the document workflow instances log entries.
post: Transition a document workflow by creating a new document workflow instance log entry.
"""
external_object_pk_url_kwarg = 'document_id'
external_object_queryset = Document.valid.all()
mayan_external_object_permissions = {
'GET': (permission_workflow_template_view,),
'POST': (permission_workflow_instance_transition,),
}
mayan_object_permissions = {
'GET': (permission_workflow_template_view,),
}
ordering_fields = (
'comment', 'id', 'transition', 'transition__destination_state',
'transition__origin_state'
)
serializer_class = WorkflowInstanceLogEntrySerializer
def get_serializer_context(self):
context = super().get_serializer_context()
if self.kwargs:
context.update(
{
'workflow_instance': self.get_workflow_instance(),
}
)
return context
def get_queryset(self):
return self.get_workflow_instance().log_entries.all()
def get_workflow_instance(self):
workflow = get_object_or_404(
klass=self.external_object.workflows,
pk=self.kwargs['workflow_instance_id']
)
return workflow
class APIWorkflowInstanceLogEntryTransitionListView(
ExternalObjectAPIViewMixin, generics.ListAPIView
):
"""
get: Returns a list of all the possible transition choices for the workflow instance.
"""
external_object_pk_url_kwarg = 'document_id'
external_object_queryset = Document.valid.all()
mayan_external_object_permissions = {
'GET': (permission_workflow_template_view,),
}
mayan_object_permissions = {
'GET': (permission_workflow_template_view,),
}
ordering_fields = ('destination_state', 'id', 'origin_state')
serializer_class = WorkflowTemplateTransitionSerializer
def get_serializer_context(self):
context = super().get_serializer_context()
if self.kwargs:
context.update(
{
'workflow_instance': self.get_workflow_instance(),
}
)
return context
def get_queryset(self):
return self.get_workflow_instance().get_transition_choices(
_user=self.request.user
)
def get_workflow_instance(self):
workflow = get_object_or_404(
klass=self.external_object.workflows,
pk=self.kwargs['workflow_instance_id']
)
return workflow
|
#!/usr/bin/python
###############################################################################
#
# remote.py
#
# <tbd>.
#
# Resources:
# <list>
#
# January 12, 2019
#
###############################################################################
import time
###############################################################################
#
# main()
#
def main():
"""<tbd>"""
###############################################################################
#
# send_cmd()
#
def send_cmd(cmd_string, time_out = 1):
"""
Send a command string to the controller and block until it returns with prompt or times out.
Return 0 if prompt ok, -1 if time-out is reached before prompt.
"""
print 'cmd: {}'.format(cmd_string)
send_bytes = bytearray()
send_bytes.extend((cmd_string + '\r'))
self.__ser.write(send_bytes)
# Wait for prompt or time-out
start = time.time()
while True:
pr = self.__ser.read(3)
if pr.strip('\r\n\t ') == '>':
return 0
end = time.time()
if ( end - start > time_out ):
break
return -1
###############################################################################
#
# Startup
#
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from CNN_Datasets.R_A.datasets.CWRU import CWRU
from CNN_Datasets.R_A.datasets.CWRUFFT import CWRUFFT
from CNN_Datasets.R_A.datasets.CWRUCWT import CWRUCWT
from CNN_Datasets.R_A.datasets.CWRUSTFT import CWRUSTFT
from CNN_Datasets.R_A.datasets.CWRUSlice import CWRUSlice
from CNN_Datasets.R_A.datasets.MFPT import MFPT
from CNN_Datasets.R_A.datasets.MFPTFFT import MFPTFFT
from CNN_Datasets.R_A.datasets.MFPTCWT import MFPTCWT
from CNN_Datasets.R_A.datasets.MFPTSTFT import MFPTSTFT
from CNN_Datasets.R_A.datasets.MFPTSlice import MFPTSlice
from CNN_Datasets.R_A.datasets.PU import PU
from CNN_Datasets.R_A.datasets.PUFFT import PUFFT
from CNN_Datasets.R_A.datasets.PUCWT import PUCWT
from CNN_Datasets.R_A.datasets.PUSTFT import PUSTFT
from CNN_Datasets.R_A.datasets.PUSlice import PUSlice
from CNN_Datasets.R_A.datasets.SEU import SEU
from CNN_Datasets.R_A.datasets.SEUFFT import SEUFFT
from CNN_Datasets.R_A.datasets.SEUCWT import SEUCWT
from CNN_Datasets.R_A.datasets.SEUSTFT import SEUSTFT
from CNN_Datasets.R_A.datasets.SEUSlice import SEUSlice
from CNN_Datasets.R_A.datasets.UoC import UoC
from CNN_Datasets.R_A.datasets.UoCFFT import UoCFFT
from CNN_Datasets.R_A.datasets.UoCCWT import UoCCWT
from CNN_Datasets.R_A.datasets.UoCSTFT import UoCSTFT
from CNN_Datasets.R_A.datasets.UoCSlice import UoCSlice
from CNN_Datasets.R_A.datasets.JNU import JNU
from CNN_Datasets.R_A.datasets.JNUFFT import JNUFFT
from CNN_Datasets.R_A.datasets.JNUCWT import JNUCWT
from CNN_Datasets.R_A.datasets.JNUSTFT import JNUSTFT
from CNN_Datasets.R_A.datasets.JNUSlice import JNUSlice
from CNN_Datasets.R_A.datasets.XJTU import XJTU
from CNN_Datasets.R_A.datasets.XJTUFFT import XJTUFFT
from CNN_Datasets.R_A.datasets.XJTUCWT import XJTUCWT
from CNN_Datasets.R_A.datasets.XJTUSTFT import XJTUSTFT
from CNN_Datasets.R_A.datasets.XJTUSlice import XJTUSlice
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Ross Scroggs All Rights Reserved.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""GAM messages
"""
# These values can be translated into other languages
ACCESS_FORBIDDEN = 'Access Forbidden'
API_ACCESS_DENIED = 'API access Denied'
API_CHECK_CLIENT_AUTHORIZATION = 'Please make sure the Client ID: {0} is authorized for the appropriate API or scopes:\n{1}\n\nRun: gam oauth create\n'
API_CHECK_SVCACCT_AUTHORIZATION = 'Please make sure the Service Account Client name: {0} is authorized for the appropriate API or scopes:\n{1}\n\nRun: gam user {2} check serviceaccount\n'
DISABLE_TLS_MIN_MAX = 'Execute: gam select default config tls_max_version "" tls_min_version "" save\n'
DOES_NOT_EXIST = 'Does not exist'
DOES_NOT_EXIST_OR_HAS_INVALID_FORMAT = '{0}: {1}, Does not exist or has invalid format'
EXECUTE_GAM_OAUTH_CREATE = '\nPlease run\n\ngam oauth delete\ngam oauth create\n\n'
EXPECTED = 'Expected'
INSTRUCTIONS_OAUTH2SERVICE_JSON = 'Please run\n\ngam create|use project\ngam user <user> check serviceaccount\n\nto create and authorize a Service account.\n'
INSUFFICIENT_PERMISSIONS_TO_PERFORM_TASK = 'Insufficient permissions to perform this task'
INVALID = 'Invalid'
INVALID_JSON_INFORMATION = 'Google API reported Invalid JSON Information'
IS_EXPIRED_OR_REVOKED = '{0}: {1}, Is expired or has been revoked'
NOT_FOUND = 'Not Found'
NO_CLIENT_ACCESS_ALLOWED = 'No Client Access allowed'
NO_SCOPES_FOR_API = 'There are no scopes authorized for the {0}'
NO_SVCACCT_ACCESS_ALLOWED = 'No Service Account Access allowed'
SERVICE_NOT_APPLICABLE = 'Service not applicable/Does not exist'
SERVICE_NOT_APPLICABLE_THIS_ADDRESS = 'Service not applicable for this address: {0}'
STRING_LENGTH = 'string length'
UNKNOWN = 'Unknown'
UNKNOWN_API_OR_VERSION = 'Unknown Google API or version: ({0}), contact {1}'
USED = 'Used'
|
# -*- coding: utf-8 -*-
"""056 - Maior e Menor da Sequência
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-RmYLOW85mV31Tpi2dQ285vIwO3wR3tL
"""
pesos = [ ]
for p in range(0,5):
peso = float(input('Digite o peso: '))
pesos.append(peso)
print('O maior peso é {}Kg' .format(max(pesos)))
print('O menor peso é {}Kg' .format(min(pesos))) |
import sys
sys.path.append('..')
import json
import os
from datetime import datetime
from typing import Dict, List
import platform
import warnings
warnings.filterwarnings("ignore")
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Optimizer
import numpy as np
import fire
from tqdm import tqdm
from models.downsampler import Downsampler
from utils.common_utils import get_noise, np_to_torch
from utils.bayesian_utils import NLLLoss, uncert_regression_gal, add_noise_sgld
from train_utils import closure, track_training, get_imgs, save_run, get_net_and_optim, get_mc_preds, track_uncert_sgld
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
num_input_channels = 1
num_channels_down = [16, 32, 64, 128, 128, 128]
num_channels_up = [16, 32, 64, 128, 128, 128]
num_channels_skip = 0
# num_scales = 5
upsample_mode = 'nearest'
filter_size_down = 5
filter_size_up = 3
filter_size_skip = 1
need1x1_up = False
# need_sigmoid = False
pad = 'reflection'
imsize = -1 # (320, 320)
mc_iter = 10
reg_noise_std = 0.
exp_weight = 0.99
def inpainting(exp_name: str = None,
img_name: str = 'skin_lesion',
criterion: str = 'nll',
num_iter: int = 50000,
num_scales: int = 6,
gpu: int = 0,
seed: int = 42,
net_specs: dict = {},
optim_specs: dict = None,
path_log_dir: str = None,
save: bool = True,
net: nn.Module = None,
optimizer: Optimizer = None) -> Dict[str, List[float]]:
"""
Params
------------------------------------
img_name:
criterion: nll or mse
num_scales:
gpu:
seed:
net_specs: dropout_type, dropout_p, prior_mu, prior_sigma, prior_pi, kl_type, beta_type, sgld, burnin_iter, mcmc_iter
"""
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
device = 'cuda:' + str(gpu)
# torch.set_num_threads(1)
log_dir = [f'{str(k)}_{str(v)[:4]}' for k, v in net_specs.items()]
if exp_name is None:
exp_name = f"%s_%s_%s" % ('_'.join(log_dir), img_name, datetime.now().strftime("%m_%d_%Y_%H_%M_%S"))
if path_log_dir is None:
path_log_dir = '/media/fastdata/toelle/logs_midl_inp/%s' % exp_name
else:
path_log_dir = '%s/%s' % (path_log_dir, exp_name)
if save:
if not os.path.exists(path_log_dir):
os.mkdir(path_log_dir)
with open(path_log_dir + '/net_info.json', 'w') as f:
info = net_specs.copy()
info["num_scales"] = num_scales
info["criterion"] = criterion
info["img_name"] = img_name
info["imsize"] = imsize
json.dump(info, f, indent=4)
imgs = get_imgs(img_name, 'inpainting', imsize=imsize)
net_input = get_noise(num_input_channels, 'noise', (imgs['gt'].shape[1], imgs['gt'].shape[2])).to(device).detach()#.type(dtype).detach()
num_output_channels = imgs['gt'].shape[0] + 1
img_torch = np_to_torch(imgs['gt']).to(device)#.type(dtype)
img_mask_torch = np_to_torch(imgs['mask']).to(device)#.type(dtype)
if net is None and optimizer is None:
net, optimizer = get_net_and_optim(num_input_channels, num_output_channels, num_channels_down, num_channels_up, num_channels_skip, num_scales, filter_size_down, filter_size_up, filter_size_skip, upsample_mode=upsample_mode, pad=pad, need1x1_up=need1x1_up, net_specs=net_specs, optim_specs=optim_specs)
net = net.to(device)#.type(dtype)
if criterion == 'nll':
criterion = NLLLoss(reduction='mean').to(device)#.type(dtype)
else:
criterion = nn.MSELoss(reduction='mean').to(device)#.type(dtype)
net_input_saved = net_input.detach().clone()
noise = net_input.detach().clone()
out_avg = None
results = {}
sgld_imgs = [] if "sgld_cheng" in list(net_specs.keys()) else None
pbar = tqdm(range(1, num_iter+1))
for i in pbar:
if reg_noise_std > 0:
net_input = net_input_saved + (noise.normal_() * reg_noise_std)
ELBO, out, _ = closure(net, optimizer, net_input, img_torch, criterion, mask=img_mask_torch)
if out_avg is None:
out_avg = out.detach()
else:
out_avg = out_avg * exp_weight + out.detach() * (1 - exp_weight)
results = track_training(img_torch, img_torch*img_mask_torch, dict(to_corrupted=out, to_gt=out, to_gt_sm=out_avg), results)
if "sgld_cheng" in list(net_specs.keys()):
sgld_imgs = track_uncert_sgld(sgld_imgs=sgld_imgs, iter=i, img=out.detach(), **net_specs)
add_noise_sgld(net, 2 * optim_specs["lr"])
pbar.set_description('I: %d | ELBO: %.2f | PSNR_noisy: %.2f | PSNR_gt: %.2f | PSNR_gt_sm: %.2f' % (i, ELBO.item(), results['psnr_corrupted'][-1], results['psnr_gt'][-1], results['psnr_gt_sm'][-1]))
if save:
save_run(results, net, optimizer, net_input_saved, out_avg, sgld_imgs, path=path_log_dir)
if __name__ != "__main__":
return results
if __name__ == "__main__":
fire.Fire(inpainting)
|
from libs import engineLib as engine
##Done with the includes
class Zone(object):##this is all rooms and areas the player will be in. It has add/remove item functions and search/examine functions
def __init__(self, name, references, description, contents, exits, bLocked, keyItem, blockedText, unlockText, bDestroyKey, keyDestroyText, bEvent, Trigger, Event, structures, npcs):
self.name = name
self.references = references
self.description = description
self.contents = contents
self.exits = exits
self.bLocked = bLocked
self.keyItem = keyItem
self.blockedText = blockedText
self.unlockText = unlockText
self.bDestroyKey = bDestroyKey
self.keyDestroyText = keyDestroyText
self.bEvent = bEvent
self.Trigger = Trigger
self.Event = Event
self.structures = structures
self.npcs = npcs
def examineRoom(self):
print("You are in a " + self.description)
def searchRoom(self):
print("You search the immediate area, and you find:")
if(self.contents != {}):
for i in self.contents:
if(self.contents[i] == 1):
print(engine.stringToClass(i).name)
else:
print(str(self.contents[i])+ " " + i + "s")
else:
print("Nothing.")
if(self.structures != []):
print("In the %s you also see:" % (self.references[0]))
for s in self.structures:
print(engine.stringToClass(s).name)
if(self.exits != {}):
print("And exits to the")
for x in self.exits:
print(x)
if(self.npcs != []):
for c in self.npcs:
print("%s is here." % (engine.stringToClass(c).name))
def addItem(self, item, quantity):
if(len(self.contents) > 0):
for i in self.contents:
if(item == i):
self.contents[i] = self.contents[i] + quantity
break
else:
self.contents[item] = quantity
break
else:
self.contents[item] = quantity
def removeItem(self, item, quantity):
for i in self.contents:
if(i == item):
if(self.contents[i] > quantity):
self.contents[i] = self.contents[i] - quantity
break
else:
del self.contents[i]
break
def addExit(self, direction, zone):
for x in self.exits:
if(x == direction):
print("There is already an exit that way.")
break
else:
self.exits[direction] = zone
def removeExit(self, direction):
for x in self.exits:
if(x == direction):
del self.exits[x]
break
def addStructure(self, newStructure):
for i in self.structures:
if(newStructure == i):
break
else:
self.structures.append(newStructure)
def removeStucture(self, Structure):
for i in self.structures:
if(i == Structure):
self.structures.remove(i)
break
def addNPC(self, NPC):
for c in self.npcs:
if(NPC == c):
break
else:
self.npcs.append(NPC)
def removeNPC(self, NPC):
for c in self.npcs:
if(c == NPC):
self.npcs.remove(c)
break |
from setuptools import setup
setup(
name = 'evohomeclient',
version = '0.2.8',
description = 'Python client for connecting to the Evohome webservice',
url = 'https://github.com/watchforstock/evohome-client/',
download_url = 'https://github.com/watchforstock/evohome-client/tarball/0.2.8',
author = 'Andrew Stock',
author_email = 'evohome@andrew-stock.com',
license = 'Apache 2',
classifiers = [
'Development Status :: 3 - Alpha',
],
keywords = ['evohome'],
packages = ['evohomeclient', 'evohomeclient2'],
install_requires = ['requests']
)
|
import numpy as np
def get_sorted_top_k(array, top_k=1, axis=-1, reverse=False):
"""
多维数组排序
Args:
array: 多维数组
top_k: 取数
axis: 轴维度
reverse: 是否倒序
Returns:
top_sorted_scores: 值
top_sorted_indexes: 位置
"""
if reverse:
# argpartition分区排序,在给定轴上找到最小的值对应的idx,partition同理找对应的值
# kth表示在前的较小值的个数,带来的问题是排序后的结果两个分区间是仍然是无序的
# kth绝对值越小,分区排序效果越明显
axis_length = array.shape[axis]
partition_index = np.take(np.argpartition(array, kth=-top_k, axis=axis),
range(axis_length - top_k, axis_length), axis)
else:
partition_index = np.take(np.argpartition(array, kth=top_k, axis=axis), range(0, top_k), axis)
top_scores = np.take_along_axis(array, partition_index, axis)
# 分区后重新排序
sorted_index = np.argsort(top_scores, axis=axis)
if reverse:
sorted_index = np.flip(sorted_index, axis=axis)
top_sorted_scores = np.take_along_axis(top_scores, sorted_index, axis)
top_sorted_indexes = np.take_along_axis(partition_index, sorted_index, axis)
return top_sorted_scores, top_sorted_indexes
if __name__ == "__main__":
import time
from sklearn.metrics.pairwise import cosine_similarity
x = np.random.rand(10, 128)
y = np.random.rand(1000000, 128)
z = cosine_similarity(x, y)
start_time = time.time()
sorted_index_1 = get_sorted_top_k(z, top_k=3, axis=1, reverse=True)[1]
print(time.time() - start_time)
start_time = time.time()
sorted_index_2 = np.flip(np.argsort(z, axis=1)[:, -3:], axis=1)
print(time.time() - start_time)
print((sorted_index_1 == sorted_index_2).all())
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
import frappe, unittest
from frappe.contacts.doctype.address.address import get_address_display
class TestAddress(unittest.TestCase):
def test_template_works(self):
if not frappe.db.exists('Address Template', 'India'):
frappe.get_doc({
"doctype": "Address Template",
"country": 'India',
"is_default": 1
}).insert()
if not frappe.db.exists('Address', '_Test Address-Office'):
frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test Address",
"address_type": "Office",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000"
}).insert()
address = frappe.get_list("Address")[0].name
display = get_address_display(frappe.get_doc("Address", address).as_dict())
self.assertTrue(display) |
from fabric.api import run
from fabric.context_managers import cd
def deploy():
with cd("~/classwhole"):
run("./deploy.sh")
|
import requests, os, json, time
import pandas as pd
""" TODO:
add features to df: eg. javascripts, widgets, language, frameworks, analytics
"""
#domains_file = '../Datasets/features_extractions/base_(all).csv'
domains_file = './new_mal_urls.csv'
#path = os.path.dirname(os.path.abspath(__file__))
#df = pd.read_csv(path+'/'+domains_file)
df= pd.read_csv(domains_file)
domains = df['url']
df['builtwith']=""
print(domains)
counter=0
url=""
error_counter=0
for index, domain in enumerate(domains):
print(f'Run number {index} / {len(domains)}')
if counter%3 == 0:
url = f'https://api.builtwith.com/free1/api.json?KEY=b3051c45-25c1-4ac5-b13a-c840091d0841&LOOKUP={domain}'
elif counter%3 == 1:
url= f'https://api.builtwith.com/free1/api.json?KEY=1a30a8d5-3080-4792-8c0e-730c89436e83&LOOKUP={domain}'
elif counter%3 == 2:
url=f'https://api.builtwith.com/free1/api.json?KEY=c72cbb9a-039b-4e5d-8848-fa90af9b0168&LOOKUP={domain}'
res = requests.get(url)
ans = res.text
time.sleep(0.4)
if ans.find('Errors') != -1:
error_counter+=1
print(ans)
df['builtwith'][index]="-1"
print(f'Error number {error_counter}')
counter+=1
new_df=df[['url', 'builtwith']].copy()
new_df.to_csv('CheckedBuiltwith.csv')
df = pd.read_csv('CheckedBuiltwith.csv')
domains = df['url']
new_domains=[]
for index, domain in enumerate(domains):
if df['builtwith'][index]!= -1:
new_domains.append([domain,"1"])
new_df = pd.DataFrame(new_domains, columns=['0', '1'])
new_df.to_csv('NewMaliciousDomains.csv')
|
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2020 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import filecmp
import logging
import os.path
import shutil
import unittest
from unittest import mock
import numpy as np
from rmgpy import settings
from rmgpy.data.kinetics.database import KineticsDatabase
from rmgpy.data.kinetics.family import TemplateReaction
from rmgpy.data.rmg import RMGDatabase
from rmgpy.data.thermo import ThermoDatabase
from rmgpy.molecule import Molecule
from rmgpy.species import Species
###################################################
class TestFamily(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
A function run ONCE before all unit tests in this class.
"""
# Set up a dummy database
cls.database = KineticsDatabase()
cls.database.load_families(
path=os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families'),
families=[
'intra_H_migration',
'R_Addition_MultipleBond',
'H_Abstraction',
'Intra_ene_reaction',
'6_membered_central_C-C_shift',
'1,2_shiftC',
'Intra_R_Add_Exo_scission',
'intra_substitutionS_isomerization',
'R_Addition_COm',
'R_Recombination'
],
)
cls.family = cls.database.families['intra_H_migration']
def test_get_backbone_roots(self):
"""
Test the get_backbone_roots() function
"""
backbones = self.family.get_backbone_roots()
self.assertEquals(backbones[0].label, "RnH")
def test_get_end_roots(self):
"""
Test the get_end_roots() function
"""
ends = self.family.get_end_roots()
self.assertEquals(len(ends), 2)
self.assertIn(self.family.groups.entries["Y_rad_out"], ends)
self.assertIn(self.family.groups.entries["XH_out"], ends)
def test_get_top_level_groups(self):
"""
Test the get_top_level_groups() function
"""
top_groups = self.family.get_top_level_groups(self.family.groups.entries["RnH"])
self.assertEquals(len(top_groups), 4)
self.assertIn(self.family.groups.entries["R5Hall"], top_groups)
self.assertIn(self.family.groups.entries["R6Hall"], top_groups)
self.assertIn(self.family.groups.entries["R2Hall"], top_groups)
self.assertIn(self.family.groups.entries["R3Hall"], top_groups)
def test_react_benzene_bond(self):
"""
Test that hydrogen addition to benzene (w/ benzene bonds) returns kekulized product.
"""
family = self.database.families['R_Addition_MultipleBond']
reactants = [Molecule().from_adjacency_list("""
1 *1 C u0 p0 c0 {2,B} {6,B} {7,S}
2 *2 C u0 p0 c0 {1,B} {3,B} {8,S}
3 C u0 p0 c0 {2,B} {4,B} {9,S}
4 C u0 p0 c0 {3,B} {5,B} {10,S}
5 C u0 p0 c0 {4,B} {6,B} {11,S}
6 C u0 p0 c0 {1,B} {5,B} {12,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
"""),
Molecule().from_adjacency_list("1 *3 H u1 p0 c0")]
expected_product = Molecule().from_adjacency_list("""
multiplicity 2
1 C u0 p0 c0 {2,S} {6,S} {7,S} {13,S}
2 C u1 p0 c0 {1,S} {3,S} {8,S}
3 C u0 p0 c0 {2,S} {4,D} {9,S}
4 C u0 p0 c0 {3,D} {5,S} {10,S}
5 C u0 p0 c0 {4,S} {6,D} {11,S}
6 C u0 p0 c0 {1,S} {5,D} {12,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
13 H u0 p0 c0 {1,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
self.assertTrue(expected_product.is_isomorphic(products[0]))
def test_react_benzene_bond2(self):
"""
Test that hydrogen addition to phenanthrene (w/ benzene bonds) returns kekulized product.
"""
family = self.database.families['R_Addition_MultipleBond']
reactants = [Molecule().from_adjacency_list("""
1 *1 C u0 p0 c0 {2,B} {3,B} {6,B}
2 *2 C u0 p0 c0 {1,B} {4,B} {9,B}
3 C u0 p0 c0 {1,B} {5,B} {7,B}
4 C u0 p0 c0 {2,B} {8,B} {10,B}
5 C u0 p0 c0 {3,B} {11,B} {17,S}
6 C u0 p0 c0 {1,B} {12,B} {18,S}
7 C u0 p0 c0 {3,B} {8,B} {19,S}
8 C u0 p0 c0 {4,B} {7,B} {20,S}
9 C u0 p0 c0 {2,B} {13,B} {21,S}
10 C u0 p0 c0 {4,B} {14,B} {23,S}
11 C u0 p0 c0 {5,B} {12,B} {15,S}
12 C u0 p0 c0 {6,B} {11,B} {16,S}
13 C u0 p0 c0 {9,B} {14,B} {22,S}
14 C u0 p0 c0 {10,B} {13,B} {24,S}
15 H u0 p0 c0 {11,S}
16 H u0 p0 c0 {12,S}
17 H u0 p0 c0 {5,S}
18 H u0 p0 c0 {6,S}
19 H u0 p0 c0 {7,S}
20 H u0 p0 c0 {8,S}
21 H u0 p0 c0 {9,S}
22 H u0 p0 c0 {13,S}
23 H u0 p0 c0 {10,S}
24 H u0 p0 c0 {14,S}
"""),
Molecule().from_adjacency_list("1 *3 H u1 p0 c0")]
expected_product = Molecule().from_adjacency_list("""
multiplicity 2
1 *1 C u0 p0 c0 {2,S} {3,S} {5,S} {15,S}
2 *2 C u1 p0 c0 {1,S} {4,S} {8,S}
3 C u0 p0 c0 {1,S} {6,S} {7,D}
4 C u0 p0 c0 {2,S} {9,D} {10,S}
5 C u0 p0 c0 {1,S} {11,D} {16,S}
6 C u0 p0 c0 {3,S} {12,D} {19,S}
7 C u0 p0 c0 {3,D} {9,S} {20,S}
8 C u0 p0 c0 {2,S} {13,D} {22,S}
9 C u0 p0 c0 {4,D} {7,S} {21,S}
10 C u0 p0 c0 {4,S} {14,D} {24,S}
11 C u0 p0 c0 {5,D} {12,S} {18,S}
12 C u0 p0 c0 {6,D} {11,S} {17,S}
13 C u0 p0 c0 {8,D} {14,S} {23,S}
14 C u0 p0 c0 {10,D} {13,S} {25,S}
15 *3 H u0 p0 c0 {1,S}
16 H u0 p0 c0 {5,S}
17 H u0 p0 c0 {12,S}
18 H u0 p0 c0 {11,S}
19 H u0 p0 c0 {6,S}
20 H u0 p0 c0 {7,S}
21 H u0 p0 c0 {9,S}
22 H u0 p0 c0 {8,S}
23 H u0 p0 c0 {13,S}
24 H u0 p0 c0 {10,S}
25 H u0 p0 c0 {14,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
self.assertTrue(expected_product.is_isomorphic(products[0]))
def test_intra__h_migration(self):
"""
Test that the intra_H_migration family returns a properly re-labeled product structure.
This family is its own reverse.
"""
family = self.database.families['intra_H_migration']
reactants = [Molecule().from_adjacency_list("""
multiplicity 2
1 *2 C u0 p0 c0 {3,S} {11,S} {12,S} {13,S}
2 *4 C u0 p0 c0 {4,S} {5,S} {6,D}
3 *5 C u0 p0 c0 {1,S} {7,D} {14,S}
4 *1 C u1 p0 c0 {2,S} {8,S} {15,S}
5 C u0 p0 c0 {2,S} {10,D} {17,S}
6 *6 C u0 p0 c0 {2,D} {7,S} {19,S}
7 *7 C u0 p0 c0 {3,D} {6,S} {21,S}
8 C u0 p0 c0 {4,S} {9,D} {16,S}
9 C u0 p0 c0 {8,D} {10,S} {20,S}
10 C u0 p0 c0 {5,D} {9,S} {18,S}
11 *3 H u0 p0 c0 {1,S}
12 H u0 p0 c0 {1,S}
13 H u0 p0 c0 {1,S}
14 H u0 p0 c0 {3,S}
15 H u0 p0 c0 {4,S}
16 H u0 p0 c0 {8,S}
17 H u0 p0 c0 {5,S}
18 H u0 p0 c0 {10,S}
19 H u0 p0 c0 {6,S}
20 H u0 p0 c0 {9,S}
21 H u0 p0 c0 {7,S}
""")]
expected_product = Molecule().from_adjacency_list("""
multiplicity 2
1 *1 C u1 p0 c0 {3,S} {12,S} {13,S}
2 *5 C u0 p0 c0 {4,S} {5,S} {6,D}
3 *4 C u0 p0 c0 {1,S} {7,D} {14,S}
4 *2 C u0 p0 c0 {2,S} {11,S} {8,S} {15,S}
5 C u0 p0 c0 {2,S} {10,D} {17,S}
6 *7 C u0 p0 c0 {2,D} {7,S} {19,S}
7 *6 C u0 p0 c0 {3,D} {6,S} {21,S}
8 C u0 p0 c0 {4,S} {9,D} {16,S}
9 C u0 p0 c0 {8,D} {10,S} {20,S}
10 C u0 p0 c0 {5,D} {9,S} {18,S}
11 *3 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {1,S}
13 H u0 p0 c0 {1,S}
14 H u0 p0 c0 {3,S}
15 H u0 p0 c0 {4,S}
16 H u0 p0 c0 {8,S}
17 H u0 p0 c0 {5,S}
18 H u0 p0 c0 {10,S}
19 H u0 p0 c0 {6,S}
20 H u0 p0 c0 {9,S}
21 H u0 p0 c0 {7,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
mapping = {}
for label, atom in expected_product.get_all_labeled_atoms().items():
mapping[atom] = products[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_product.is_isomorphic(products[0], mapping))
def test_h_abstraction(self):
"""
Test that the H_Abstraction family returns a properly re-labeled product structure.
This family is its own reverse.
"""
family = self.database.families['H_Abstraction']
reactants = [Molecule().from_adjacency_list("""
1 *1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}
2 C u0 p0 c0 {1,S} {3,D} {7,S}
3 C u0 p0 c0 {2,D} {8,S} {9,S}
4 *2 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {3,S}
9 H u0 p0 c0 {3,S}
"""),
Molecule().from_adjacency_list("1 *3 H u1 p0 c0")]
expected_products = [Molecule().from_adjacency_list("""
1 *1 H u0 p0 c0 {2,S}
2 *2 H u0 p0 c0 {1,S}
"""),
Molecule().from_adjacency_list("""
1 *3 C u1 p0 c0 {2,S} {5,S} {6,S}
2 C u0 p0 c0 {1,S} {3,D} {7,S}
3 C u0 p0 c0 {2,D} {8,S} {9,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {3,S}
9 H u0 p0 c0 {3,S}
""")]
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 2)
mapping1 = {}
for label, atom in expected_products[0].get_all_labeled_atoms().items():
mapping1[atom] = products[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_products[0].is_isomorphic(products[0], mapping1))
mapping2 = {}
for label, atom in expected_products[1].get_all_labeled_atoms().items():
mapping2[atom] = products[1].get_labeled_atoms(label)[0]
self.assertTrue(expected_products[1].is_isomorphic(products[1], mapping2))
def test_intra_ene_reaction(self):
"""
Test that the Intra_ene_reaction family returns a properly re-labeled product structure.
This family is its own reverse.
"""
family = self.database.families['Intra_ene_reaction']
reactants = [Molecule().from_adjacency_list("""
1 *1 C u0 p0 c0 {2,S} {3,S} {4,S} {10,S}
2 *5 C u0 p0 c0 {1,S} {5,D} {6,S}
3 *2 C u0 p0 c0 {1,S} {7,D} {11,S}
4 C u0 p0 c0 {1,S} {8,D} {12,S}
5 *4 C u0 p0 c0 {2,D} {7,S} {13,S}
6 C u0 p0 c0 {2,S} {9,D} {15,S}
7 *3 C u0 p0 c0 {3,D} {5,S} {14,S}
8 C u0 p0 c0 {4,D} {9,S} {17,S}
9 C u0 p0 c0 {6,D} {8,S} {16,S}
10 *6 H u0 p0 c0 {1,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {6,S}
16 H u0 p0 c0 {9,S}
17 H u0 p0 c0 {8,S}
""")]
expected_product = Molecule().from_adjacency_list("""
1 *2 C u0 p0 c0 {2,D} {3,S} {4,S}
2 *3 C u0 p0 c0 {1,D} {5,S} {6,S}
3 *1 C u0 p0 c0 {1,S} {7,S} {11,S} {10,S}
4 C u0 p0 c0 {1,S} {8,D} {12,S}
5 *4 C u0 p0 c0 {2,S} {7,D} {13,S}
6 C u0 p0 c0 {2,S} {9,D} {15,S}
7 *5 C u0 p0 c0 {3,S} {5,D} {14,S}
8 C u0 p0 c0 {4,D} {9,S} {17,S}
9 C u0 p0 c0 {6,D} {8,S} {16,S}
10 *6 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {6,S}
16 H u0 p0 c0 {9,S}
17 H u0 p0 c0 {8,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
mapping = {}
for label, atom in expected_product.get_all_labeled_atoms().items():
mapping[atom] = products[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_product.is_isomorphic(products[0], mapping))
def test_6_membered_central_cc_shift(self):
"""
Test that the 6_membered_central_C-C_shift family returns a properly re-labeled product structure.
This family is its own reverse.
"""
family = self.database.families['6_membered_central_C-C_shift']
reactants = [Molecule().from_adjacency_list("""
1 *3 C u0 p0 c0 {2,S} {3,S} {7,S} {8,S}
2 *4 C u0 p0 c0 {1,S} {4,S} {9,S} {10,S}
3 *2 C u0 p0 c0 {1,S} {5,T}
4 *5 C u0 p0 c0 {2,S} {6,T}
5 *1 C u0 p0 c0 {3,T} {11,S}
6 *6 C u0 p0 c0 {4,T} {12,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {2,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
""")]
expected_product = Molecule().from_adjacency_list("""
1 *3 C u0 p0 c0 {2,S} {5,D} {7,S}
2 *4 C u0 p0 c0 {1,S} {6,D} {8,S}
3 *1 C u0 p0 c0 {5,D} {9,S} {10,S}
4 *6 C u0 p0 c0 {6,D} {11,S} {12,S}
5 *2 C u0 p0 c0 {1,D} {3,D}
6 *5 C u0 p0 c0 {2,D} {4,D}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {4,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
mapping = {}
for label, atom in expected_product.get_all_labeled_atoms().items():
mapping[atom] = products[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_product.is_isomorphic(products[0], mapping))
def test_12_shift_c(self):
"""
Test that the 1,2_shiftC family returns a properly re-labeled product structure.
This family is its own reverse.
"""
family = self.database.families['1,2_shiftC']
reactants = [Molecule().from_adjacency_list("""
multiplicity 2
1 *2 C u0 p0 c0 {2,S} {3,S} {8,S} {9,S}
2 *1 C u0 p0 c0 {1,S} {10,S} {11,S} {12,S}
3 *3 C u1 p0 c0 {1,S} {4,S} {5,S}
4 C u0 p0 c0 {3,S} {6,D} {13,S}
5 C u0 p0 c0 {3,S} {7,D} {14,S}
6 C u0 p0 c0 {4,D} {7,S} {15,S}
7 C u0 p0 c0 {5,D} {6,S} {16,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {1,S}
10 H u0 p0 c0 {2,S}
11 H u0 p0 c0 {2,S}
12 H u0 p0 c0 {2,S}
13 H u0 p0 c0 {4,S}
14 H u0 p0 c0 {5,S}
15 H u0 p0 c0 {6,S}
16 H u0 p0 c0 {7,S}
""")]
expected_product = Molecule().from_adjacency_list("""
multiplicity 2
1 *2 C u0 p0 c0 {2,S} {3,S} {4,S} {7,S}
2 *1 C u0 p0 c0 {1,S} {8,S} {9,S} {10,S}
3 C u0 p0 c0 {1,S} {5,D} {11,S}
4 C u0 p0 c0 {1,S} {6,D} {12,S}
5 C u0 p0 c0 {3,D} {6,S} {13,S}
6 C u0 p0 c0 {4,D} {5,S} {14,S}
7 *3 C u1 p0 c0 {1,S} {15,S} {16,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {2,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {6,S}
15 H u0 p0 c0 {7,S}
16 H u0 p0 c0 {7,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
mapping = {}
for label, atom in expected_product.get_all_labeled_atoms().items():
mapping[atom] = products[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_product.is_isomorphic(products[0], mapping))
def test_intra_r_add_exo_scission(self):
"""
Test that the Intra_R_Add_Exo_scission family returns a properly re-labeled product structure.
This family is its own reverse.
"""
family = self.database.families['Intra_R_Add_Exo_scission']
reactants = [Molecule().from_adjacency_list("""
multiplicity 2
1 *3 C u0 p0 c0 {2,S} {8,S} {11,S} {12,S}
2 *2 C u0 p0 c0 {1,S} {3,B} {4,B}
3 C u0 p0 c0 {2,B} {5,B} {13,S}
4 C u0 p0 c0 {2,B} {7,B} {17,S}
5 C u0 p0 c0 {3,B} {6,B} {14,S}
6 C u0 p0 c0 {5,B} {7,B} {15,S}
7 C u0 p0 c0 {4,B} {6,B} {16,S}
8 *1 C u1 p0 c0 {1,S} {9,S} {18,S}
9 C u0 p0 c0 {8,S} {10,T}
10 C u0 p0 c0 {9,T} {19,S}
11 H u0 p0 c0 {1,S}
12 H u0 p0 c0 {1,S}
13 H u0 p0 c0 {3,S}
14 H u0 p0 c0 {5,S}
15 H u0 p0 c0 {6,S}
16 H u0 p0 c0 {7,S}
17 H u0 p0 c0 {4,S}
18 H u0 p0 c0 {8,S}
19 H u0 p0 c0 {10,S}
""")]
expected_product = Molecule().from_adjacency_list("""
multiplicity 2
1 *3 C u0 p0 c0 {2,S} {8,S} {9,S} {11,S}
2 *2 C u0 p0 c0 {1,S} {3,B} {4,B}
3 C u0 p0 c0 {2,B} {5,B} {12,S}
4 C u0 p0 c0 {2,B} {7,B} {16,S}
5 C u0 p0 c0 {3,B} {6,B} {13,S}
6 C u0 p0 c0 {5,B} {7,B} {14,S}
7 C u0 p0 c0 {4,B} {6,B} {15,S}
8 *1 C u1 p0 c0 {1,S} {17,S} {18,S}
9 C u0 p0 c0 {1,S} {10,T}
10 C u0 p0 c0 {9,T} {19,S}
11 H u0 p0 c0 {1,S}
12 H u0 p0 c0 {3,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {6,S}
15 H u0 p0 c0 {7,S}
16 H u0 p0 c0 {4,S}
17 H u0 p0 c0 {8,S}
18 H u0 p0 c0 {8,S}
19 H u0 p0 c0 {10,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
mapping = {}
for label, atom in expected_product.get_all_labeled_atoms().items():
mapping[atom] = products[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_product.is_isomorphic(products[0], mapping))
def test_intra_substitution_s_isomerization(self):
"""
Test that the intra_substitutionS_isomerization family returns a properly re-labeled product structure.
This family is its own reverse.
"""
family = self.database.families['intra_substitutionS_isomerization']
reactants = [Molecule().from_adjacency_list("""
multiplicity 2
1 *2 C u0 p0 c0 {3,S} {4,S} {5,S} {6,S}
2 C u0 p0 c0 {3,S} {7,S} {8,S} {9,S}
3 *3 C u1 p0 c0 {1,S} {2,S} {10,S}
4 *1 S u0 p2 c0 {1,S} {11,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
""")]
expected_product = Molecule().from_adjacency_list("""
multiplicity 2
1 *2 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}
3 *3 C u1 p0 c0 {1,S} {9,S} {10,S}
4 *1 S u0 p2 c0 {1,S} {11,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
""")
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
mapping = {}
for label, atom in expected_product.get_all_labeled_atoms().items():
mapping[atom] = products[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_product.is_isomorphic(products[0], mapping))
def test_r_addition_com(self):
"""
Test that the R_Addition_COm family can successfully match the reaction and returns properly product structures.
This family's product template is generated by charged groups.
"""
family = self.database.families['R_Addition_COm']
reactants = [Molecule().from_adjacency_list("""
1 *1 C u0 p1 c-1 {2,T}
2 *3 O u0 p1 c+1 {1,T}
"""),
Molecule().from_adjacency_list("""
multiplicity 2
1 C u0 p0 c0 {2,D} {7,S} {8,S}
2 C u0 p0 c0 {1,D} {3,S} {9,S}
3 C u0 p0 c0 {2,S} {4,S} {10,S} {11,S}
4 *2 C u1 p0 c0 {3,S} {5,S} {6,S}
5 H u0 p0 c0 {4,S}
6 H u0 p0 c0 {4,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {3,S}
"""),
]
expected_products = [Molecule().from_adjacency_list("""
multiplicity 2
1 C u0 p0 c0 {2,D} {7,S} {8,S}
2 C u0 p0 c0 {1,D} {3,S} {9,S}
3 C u0 p0 c0 {2,S} {4,S} {10,S} {11,S}
4 *2 C u0 p0 c0 {3,S} {5,S} {12,S} {13,S}
5 *1 C u1 p0 c0 {4,S} {6,D}
6 *3 O u0 p2 c0 {5,D}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {4,S}
"""),
]
products = family.apply_recipe(reactants)
self.assertEqual(len(products), 1)
self.assertTrue(expected_products[0].is_isomorphic(products[0]))
def test_save_family(self):
"""
This tests the the family.save method by writing a new temporary file and
comparing it to the original source.
"""
base_path = os.path.join(settings['test_data.directory'], 'testing_database', 'kinetics', 'families')
try:
os.makedirs(os.path.join(base_path, 'intra_H_copy'))
self.family.save(os.path.join(base_path, 'intra_H_copy'))
self.assertTrue(filecmp.cmp(os.path.join(base_path, 'intra_H_migration', 'groups.py'),
os.path.join(base_path, 'intra_H_copy', 'groups.py')))
self.assertTrue(filecmp.cmp(os.path.join(base_path, 'intra_H_migration', 'rules.py'),
os.path.join(base_path, 'intra_H_copy', 'rules.py')))
self.assertTrue(filecmp.cmp(os.path.join(base_path, 'intra_H_migration', 'training', 'reactions.py'),
os.path.join(base_path, 'intra_H_copy', 'training', 'reactions.py')))
self.assertTrue(filecmp.cmp(os.path.join(base_path, 'intra_H_migration', 'training', 'dictionary.txt'),
os.path.join(base_path, 'intra_H_copy', 'training', 'dictionary.txt')))
finally:
shutil.rmtree(os.path.join(base_path, 'intra_H_copy'))
def test_reactant_num_id(self):
"""
Tests that templates aren't applied to the incorrect
number of reactants
"""
family = self.database.families['R_Recombination']
spc = Molecule().from_smiles("[CH2]CC[CH2]")
out = family._generate_reactions(reactants=[spc], forward=True)
self.assertEqual(out, [])
class TestTreeGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""A function run ONCE before all unit tests in this class."""
# Set up a dummy database
cls.database = RMGDatabase()
cls.database.load(
path=os.path.join(settings['test_data.directory'], 'testing_database'),
thermo_libraries=[],
reaction_libraries=[],
kinetics_families=[],
depository=False,
solvation=False,
testing=True,
)
cls.database.load_forbidden_structures()
cls.thermoDatabase = ThermoDatabase() # the real full Thermo Database
cls.thermoDatabase.load(path=os.path.join(settings['database.directory'], 'thermo'),
libraries=['primaryThermoLibrary'])
cls.kineticsDatabase = KineticsDatabase()
cls.kineticsDatabase.load_families(
path=os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families'),
families=[
'Singlet_Carbene_Intra_Disproportionation',
],
)
cls.family = cls.kineticsDatabase.families['Singlet_Carbene_Intra_Disproportionation']
cls.treerxns = cls.family.get_training_set(thermo_database=cls.thermoDatabase, remove_degeneracy=True,
estimate_thermo=True, fix_labels=True, get_reverse=True)
@classmethod
def tearDownClass(cls):
"""A function run ONCE after all unit tests in this class."""
import rmgpy.data.rmg
rmgpy.data.rmg.database = None
def test_a_clear_tree(self):
"""
Test that the tree was properly cleared before generation
"""
self.family.clean_tree()
ents = [ent for ent in self.family.groups.entries.values() if ent.index != -1]
self.assertEquals(len(ents), 1,
'more than one relevant group left in groups after preparing tree for generation')
self.assertEquals(len(self.family.rules.entries), 1,
'more than one group in rules.entries after preparing tree for generation')
root = self.family.groups.entries[list(self.family.rules.entries.keys())[0]]
self.assertEquals([root], self.family.forward_template.reactants)
self.assertEquals([root], self.family.groups.top)
def test_b_generate_tree(self):
"""
test tree generation process
"""
def objective(k1s, k2s):
return len(k1s) * np.std(k1s) + len(k2s) * np.std(k2s)
self.family.generate_tree(thermo_database=self.thermoDatabase, rxns=self.treerxns,
obj=objective) # test input objective function
self.family.clean_tree() # reclear
self.family.generate_tree(thermo_database=self.thermoDatabase,
rxns=self.treerxns) # test that default objective works
def test_c_parent_child(self):
"""
test that the tree is structured properly
"""
for entry in self.family.groups.entries.values():
for entry2 in entry.children:
self.assertIn(entry2, list(self.family.groups.entries.values()))
if entry.parent:
self.assertIn(entry.parent, list(self.family.groups.entries.values()))
self.assertIsNone(self.family.groups.entries['Root'].parent)
def test_f_rules(self):
"""
test that there are six rules and each is under a different group
"""
template_rxn_map = self.family.get_reaction_matches(thermo_database=self.thermoDatabase, remove_degeneracy=True)
self.family.make_bm_rules_from_template_rxn_map(template_rxn_map)
c = 0
for rs in self.family.rules.entries.values():
self.assertLess(len(rs), 2, 'more than one training reaction at a node')
if len(rs) == 1:
c += 1
self.assertEquals(c, 6, 'incorrect number of kinetics information, expected 6 found {0}'.format(c))
def test_d_regularization_dims(self):
"""
test that appropriate regularization dimensions have been identified
"""
template_rxn_map = self.family.get_reaction_matches(thermo_database=self.database.thermo, estimate_thermo=False)
for entry in self.family.groups.entries.values():
if entry.children == []:
continue
# set of violations, one atom or one bond is allowed to be in violation (if it was just created)
vio_obj = set()
pgrp = entry.item
exts = pgrp.get_extensions()
for grp, grpc, name, typ, indc in exts:
if typ == 'intNewBondExt' or typ == 'extNewBondExt':
continue
else:
val, boo = self.family.eval_ext(entry, grp, name, template_rxn_map)
if val != np.inf:
continue
atms = grp.atoms
if typ == 'bondExt':
bd = grp.get_bond(atms[indc[0]], atms[indc[1]])
bds = bd.reg_dim[1]
if boo and bds != [] and not (set(bd.order) <= set(bds)):
logging.error('bond regularization dimension missed')
vio_obj.add((tuple(indc), tuple(bds), tuple(bd.order), typ))
elif typ == 'atomExt':
atypes = atms[indc[0]].reg_dim_atm[1]
atype = atms[indc[0]].atomtype
if boo and atypes != [] and not (set(atype) <= set(atypes)):
logging.error('atomtype regularization dimension missed')
vio_obj.add((tuple(indc), tuple(atypes), tuple(atype), typ))
elif typ == 'elExt':
us = atms[indc[0]].reg_dim_u[1]
u = atms[indc[0]].radical_electrons
if boo and us != [] and not (set(u) <= set(us)):
logging.error('unpaired electron regularization dimension missed')
vio_obj.add((tuple(indc), tuple(us), tuple(u), typ))
elif typ == 'ringExt':
rs = atms[indc[0]].reg_dim_r[1]
if 'inRing' in atms[indc[0]].props.keys():
r = atms[indc[0]].props['inRing']
else:
r = [True, False]
if boo and rs != [] and not (set(r) <= set(rs)):
logging.error('in ring regularization dimension missed')
vio_obj.add((tuple(indc), tuple(rs), tuple(r), typ))
else:
raise ValueError('extension type {0} not identified within test'.format(typ))
self.assertTrue(len(vio_obj) <= 1,
'there were {0} regularization violations at, {1}'.format(len(vio_obj), vio_obj))
def test_e_regularization_structure(self):
"""
test that the tree is structured properly after regularization
"""
self.family.clean_tree()
self.family.generate_tree(thermo_database=self.thermoDatabase, rxns=self.treerxns)
self.family.check_tree()
self.family.regularize(thermo_database=self.thermoDatabase, rxns=self.treerxns)
self.family.check_tree()
class TestGenerateReactions(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""A function run ONCE before all unit tests in this class."""
# Set up a dummy database
cls.database = RMGDatabase()
cls.database.load(
path=os.path.join(settings['test_data.directory'], 'testing_database'),
thermo_libraries=[],
reaction_libraries=[],
kinetics_families=['H_Abstraction', 'R_Addition_MultipleBond', 'Singlet_Val6_to_triplet', 'R_Recombination',
'Baeyer-Villiger_step1_cat', 'Surface_Adsorption_Dissociative',
'Surface_Dissociation_vdW'],
depository=False,
solvation=False,
testing=True,
)
cls.database.load_forbidden_structures()
@classmethod
def tearDownClass(cls):
"""A function run ONCE after all unit tests in this class."""
import rmgpy.data.rmg
rmgpy.data.rmg.database = None
@mock.patch('rmgpy.data.kinetics.family.logging')
def test_debug_forbidden_reverse_rxn(self, mock_logging):
"""Test that we can automatically debug when a reverse reaction is forbidden."""
reactants = [Species().from_smiles('CC'), Species().from_smiles('[CH2]C=C[CH2]')]
products = [Species().from_smiles('C[CH2]'), Species().from_smiles('[CH2]C=CC')]
reaction = TemplateReaction(reactants=reactants, products=products)
successful = self.database.kinetics.families['H_Abstraction'].add_reverse_attribute(reaction)
self.assertFalse(successful)
mock_logging.error.assert_has_calls([
mock.call('Expecting one matching reverse reaction, not zero in reaction family H_Abstraction '
'for forward reaction CC + [CH2]C=C[CH2] <=> C[CH2] + [CH2]C=CC.\n'),
])
mock_logging.error.assert_has_calls([
mock.call('Error was fixed, the product is a forbidden structure when '
'used as a reactant in the reverse direction.'),
])
def test_add_atom_labels_for_reaction(self):
"""Test that we can add atom labels to an existing reaction"""
reactants = [Species().from_smiles('C=C'), Species().from_smiles('[OH]')]
products = [Species().from_smiles('[CH2]CO')]
reaction = TemplateReaction(reactants=reactants, products=products)
self.database.kinetics.families['R_Addition_MultipleBond'].add_atom_labels_for_reaction(reaction)
expected_reactants = [
Molecule().from_adjacency_list("""
1 *1 C u0 p0 c0 {2,D} {3,S} {4,S}
2 *2 C u0 p0 c0 {1,D} {5,S} {6,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {2,S}
6 H u0 p0 c0 {2,S}
"""),
Molecule().from_adjacency_list("""
multiplicity 2
1 *3 O u1 p2 c0 {2,S}
2 H u0 p0 c0 {1,S}
""")]
expected_products = [
Molecule().from_adjacency_list("""
multiplicity 2
1 *1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 *2 C u1 p0 c0 {1,S} {6,S} {7,S}
3 *3 O u0 p2 c0 {1,S} {8,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {3,S}
""")]
for i, reactant in enumerate(reaction.reactants):
mapping = {}
for label, atom in expected_reactants[i].get_all_labeled_atoms().items():
mapping[atom] = reactant.molecule[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_reactants[i].is_isomorphic(reactant.molecule[0], mapping))
for i, product in enumerate(reaction.products):
mapping = {}
for label, atom in expected_products[i].get_all_labeled_atoms().items():
mapping[atom] = product.molecule[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_products[i].is_isomorphic(product.molecule[0], mapping))
def test_add_atom_labels_for_reaction_r_recombination(self):
"""Test that we can add atom labels to an existing R_Recombination reaction"""
reactants = [Species().from_smiles('C[CH2]'), Species().from_smiles('[CH3]')]
products = [Species().from_smiles('CCC')]
reaction = TemplateReaction(reactants=reactants, products=products)
self.database.kinetics.families['R_Recombination'].add_atom_labels_for_reaction(reaction)
expected_reactants = [
Molecule().from_adjacency_list("""
multiplicity 2
1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 * C u1 p0 c0 {1,S} {6,S} {7,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
"""),
Molecule().from_adjacency_list("""
multiplicity 2
1 * C u1 p0 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
""")]
expected_products = [
Molecule().from_adjacency_list("""
1 * C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 * C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}
3 C u0 p0 c0 {1,S} {9,S} {10,S} {11,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {3,S}
""")]
for i, reactant in enumerate(reaction.reactants):
mapping = {}
for label, atom in expected_reactants[i].get_all_labeled_atoms().items():
mapping[atom] = reactant.molecule[0].get_labeled_atoms(label)[0]
self.assertTrue(expected_reactants[i].is_isomorphic(reactant.molecule[0], mapping))
for i, product in enumerate(reaction.products):
# There are two identical labels in the product, so we need to check both mappings
# Only one of the mappings will result in isomorphic structures though
atoms_a = expected_products[i].get_labeled_atoms('*')
atoms_b = product.molecule[0].get_labeled_atoms('*')
mapping1 = {atoms_a[0]: atoms_b[0], atoms_a[1]: atoms_b[1]}
mapping2 = {atoms_a[0]: atoms_b[1], atoms_a[1]: atoms_b[0]}
results = [
expected_products[i].is_isomorphic(product.molecule[0], mapping1),
expected_products[i].is_isomorphic(product.molecule[0], mapping2)
]
self.assertTrue(any(results))
self.assertFalse(all(results))
def test_irreversible_reaction(self):
"""Test that the Singlet_Val6_to_triplet and 1,2-Birad_to_alkene families generate irreversible reactions."""
reactant = [Molecule(smiles='O=O')]
reaction_list = self.database.kinetics.families['Singlet_Val6_to_triplet'].generate_reactions(reactant)
self.assertFalse(reaction_list[0].reversible)
def test_net_charge_of_products(self):
"""Test that _generate_product_structures() does not generate charged products"""
reactant = [Molecule(smiles='[NH-][NH2+]')]
reaction_list = self.database.kinetics.families['R_Recombination'].generate_reactions(reactant)
for rxn in reaction_list:
for product in rxn.products:
self.assertEquals(product.get_net_charge(), 0)
reactant = [Molecule(smiles='[O-][N+]#N')]
reaction_list = self.database.kinetics.families['R_Recombination'].generate_reactions(reactant)
self.assertEquals(len(reaction_list), 0)
def test_reactant_num_mismatch(self):
"""Test that we get no reactions for reactant/template size mismatch
This happens often because we test every combo of molecules against all families."""
reactants = [Molecule(smiles='C'), Molecule(smiles='[OH]')]
reaction_list = self.database.kinetics.families['Singlet_Val6_to_triplet'].generate_reactions(reactants)
self.assertEquals(len(reaction_list), 0)
reaction_list = self.database.kinetics.families['Baeyer-Villiger_step1_cat'].generate_reactions(reactants)
self.assertEquals(len(reaction_list), 0)
reaction_list = self.database.kinetics.families['Surface_Adsorption_Dissociative'].generate_reactions(reactants)
self.assertEquals(len(reaction_list), 0)
def test_reactant_num_mismatch_2(self):
"""Test that we get no reactions for reactant/template size mismatch
This happens often because we test every combo of molecules against all families."""
reactants = [
Molecule().from_smiles('CC'),
Molecule().from_adjacency_list('1 X u0'),
Molecule().from_adjacency_list('1 X u0'),
]
# reaction_list = self.database.kinetics.families['Surface_Adsorption_Dissociative'].generate_reactions(reactants)
# self.assertEquals(len(reaction_list), 14)
reaction_list = self.database.kinetics.families['Surface_Dissociation_vdW'].generate_reactions(reactants)
self.assertEquals(len(reaction_list), 0)
|
# -*- coding: utf-8 -*-
import pygame
from graphic.graphics import Background, Graphics
class Game:
def __init__(self):
self._graphics = Graphics()
self._playing = False
def start(self):
background = Background(self._graphics)
background.draw()
self._playing = True
while self._playing:
for event in pygame.event.get():
self._graphics.update()
if event.type == pygame.QUIT:
self.end_game()
break
def end_game(self):
self._playing = False
|
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.shortcuts import redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from config import utility
from learn_word import forms
from learn_word import models
import random
import logging
logger = logging.getLogger("app")
class WordList(LoginRequiredMixin, APIView):
renderer_classes = [TemplateHTMLRenderer]
def get(self, request):
user = request.user
word_list = models.EnglishWord.objects.filter(created_by_id=user.id)
page = request.GET.get('page', 1)
paginator = Paginator(word_list, 100)
try:
page_obj = paginator.page(page)
except PageNotAnInteger:
page_obj = paginator.page(1)
except EmptyPage:
page_obj = paginator.page(paginator.num_pages)
logger.debug(page_obj)
return Response({'page_obj': page_obj}, template_name='word/list.html')
def post(self, request):
return redirect('/account/dashboard')
class Star(LoginRequiredMixin, APIView):
def post(self, request):
user = request.user
logger.info(request.user.id)
is_checked = request.data.get('is_checked')
word_summary_id = request.data.get('word_summary_id')
logger.debug(f'word_summary_id:{word_summary_id} is_checked:{is_checked}')
word_summary = models.WordSummary.objects.filter(pk=word_summary_id, user_id=user.id).first()
if word_summary is None:
logger.exception("お気に入り登録失敗:一致するデータがありません")
return Response(data={'error_message': "一致するデータがありません"}, status=status.HTTP_400_BAD_REQUEST)
word_summary.is_checked = is_checked
word_summary.save()
return Response(data={'is_checked': request.data.get('is_checked')}, status=status.HTTP_200_OK)
class AnswerWord(LoginRequiredMixin, APIView):
def post(self, request):
user = request.user
logger.info(request.user.id)
word_log_id = request.data.get('word_log_id')
is_unknown = request.data.get('is_unknown')
logger.debug(f'word_log_id:{word_log_id} is_unknown: {is_unknown}')
word_log = models.WordLog.objects.filter(pk=word_log_id, user_id=user.id).first()
if word_log is None:
logger.exception("わからない単語登録失敗:一致するデータがありません")
return Response(data={'error_message': "一致するデータがありません"}, status=status.HTTP_400_BAD_REQUEST)
if is_unknown:
word_log.mark_unknown()
else:
word_log.mark_known()
return Response(status=status.HTTP_200_OK)
class Setting(LoginRequiredMixin, APIView):
renderer_classes = [TemplateHTMLRenderer]
def get(self, request):
user = request.user
setting = models.WordLearnSetting.find_by_user_id(user.id)
form = forms.SettingForm(setting.__dict__)
data = {
'form': form,
}
return Response(data=data, template_name='word/setting.html')
def post(self, request):
user = request.user
setting = models.WordLearnSetting.find_by_user_id(user.id)
logger.debug(request.POST)
form = forms.SettingForm(request.POST, instance=setting)
data = {
'form': form,
}
if not form.is_valid():
logger.debug(f'validate error: {form}')
return Response(data=data, template_name='word/setting.html')
form.save()
return redirect('/account/dashboard')
@login_required
def category(request):
category_id = request.GET.get('category_id')
path = '/'
if category_id:
this_category = models.WordCategory.objects.get(pk=category_id)
path = f'{this_category.path}{this_category.id}/'
else:
this_category = models.WordCategory(name='All categories')
categories = models.WordCategory.get_category(path=path)
for category in categories:
category.visible = category.has_word_relations()
data = {'categories': categories, 'this_category': this_category}
return render(request, template_name='word/category.html', context=data)
@login_required
def show(request, word_id):
user = request.user
setting = models.WordLearnSetting.find_by_user_id(user.id)
study_word = models.EnglishWord.objects.get(pk=word_id)
# word_log = models.WordLog.create(user_id=user.id, english_word_id=study_word.id)
word_summary = models.WordSummary.find_one(user_id=user.id, english_word_id=study_word.id)
defines = models.Define.objects.filter(english_word_id=study_word.id)
for define in defines:
define.synonyms = define.get_synonyms()
data = {
'study_word': study_word,
"word_summary": word_summary,
# "word_log": word_log,
"defines": defines,
"setting": setting,
"only_show": True,
}
return render(request, template_name='word/learn.html', context=data)
@login_required
def learn(request):
index = utility.str_to_int(request.GET.get("index"))
category_id = utility.str_to_integer(request.GET.get("category_id"))
visible_checked = utility.convert_to_bool(request.GET.get("visible_checked"))
if visible_checked:
visible_checked = 1
else:
visible_checked = 0
user = request.user
setting = models.WordLearnSetting.find_by_user_id(user.id)
limit = setting.learn_num
key = f'study_words_user_{user.id}_checked_{visible_checked}_category_{category_id}'
if index == 0:
cache.delete(key)
study_words = cache.get(key)
if(not study_words):
study_words = models.show_study_words(
user_id=user.id,
category_id=category_id,
limit=limit,
is_checked=visible_checked,
is_random=setting.is_random
)
study_words = list(study_words)
if setting.is_shuffle:
random.shuffle(study_words)
cache.set(key, study_words, timeout=60*60*24)
logger.info("get study_words")
word_count = len(study_words)
if(word_count < index+1):
cache.delete(key)
url = f'/word/learn/result?limit={word_count}'
if category_id:
url += f'&category_id={category_id}'
if visible_checked:
url += f'&visible_checked={visible_checked}'
return redirect(url)
study_word = study_words[index]
word_log = models.WordLog.create(user_id=user.id, english_word_id=study_word.id)
word_summary = models.WordSummary.find_one(user_id=user.id, english_word_id=study_word.id)
defines = models.Define.objects.filter(english_word_id=study_word.id)
for define in defines:
define.synonyms = define.get_synonyms()
logger.debug(f"defines:{defines}")
data = {
'study_word': study_word,
"word_summary": word_summary,
"word_log": word_log,
"index": index,
"category_id": category_id,
"word_count": word_count,
"visible_checked": visible_checked,
"defines": defines,
"setting": setting,
"only_show": False,
}
return render(request, template_name='word/learn.html', context=data)
@login_required
def learn_result(request):
user = request.user
# setting = models.WordLearnSetting.find_by_user_id(user.id)
limit = request.GET.get("limit", 0)
limit = int(limit)
word_logs = models.WordLog.get_learn_result(user_id=user.id, limit=limit)
if word_logs:
rate = len([l for l in word_logs if l.is_unknown is False]) / len(word_logs) * 100
else:
rate = 100
for word in word_logs:
english_word = word.english_word
word_summary = models.WordSummary.find_one(
user_id=user.id,
english_word_id=english_word.id
)
word.display_count = word_summary.display_count
word.display_order = word_summary.order
word.is_checked = word_summary.is_checked
word.summary_id = word_summary.id
category_id = request.GET.get("category_id", 'None')
visible_checked = request.GET.get("visible_checked", 0)
learn_url = f'/word/learn?continue=1'
if(category_id != 'None'):
learn_url += f'&category_id={category_id}'
if visible_checked:
learn_url += f'&visible_checked={visible_checked}'
data = {
'category_id': category_id,
'learn_url': learn_url,
'word_logs': word_logs,
'rate': rate,
}
return render(request, template_name='word/learn_result.html', context=data)
|
def ObjectType(properties, on_dressed=None):
def decorate(base):
'''
Decorate a base class with a new method
and a classmethod
'''
def info(cls, frm):
args = [frm[p] for p in properties if p is not None]
instance = cls(*args)
if on_dressed:
on_dressed(instance)
return instance
def to_info(self):
return {k: getattr(self, k) for k in properties if k is not None}
base.info = classmethod(info)
base.to_info = lambda frm: to_info(frm)
return base
return decorate
def TypeType(generator, properties):
def decorate(base):
base._generator = generator
return ObjectType(properties)(base)
return decorate
|
from client import client
from datetime import datetime
import discord
import os
import socket
import threading
import time
try:
import psutil
except ModuleNotFoundError:
has_psutil = False
else:
has_psutil = True
cmd_name = "stats"
client.basic_help(title=cmd_name, desc=f"shows various running statistics of {client.bot_name}")
detailed_help = {
"Usage": f"{client.default_prefix}{cmd_name}",
"Description": f"This command shows different available statistics of {client.bot_name}, including servers, uptime, and commands run.",
"Related": f"`{client.default_prefix} info` - shows information about {client.bot_name}",
}
client.long_help(cmd=cmd_name, mapping=detailed_help)
@client.ready
async def readier():
def psutil_update_thread_loop(client):
while client.active:
# self_process.cpu_percent() # not sure how to optimize this loop in another thread so we're going to
# comment it out and deal with it for now
psutil.cpu_percent(percpu=True)
time.sleep(5)
global psutil_update_thread
psutil_update_thread = threading.Thread(target=psutil_update_thread_loop, name="PSUtil_Background_Loop", args=[client])
return
@client.command(trigger=cmd_name, aliases=["statistics", "s"])
async def statistics(command: str, message: discord.Message):
if "--hostname" in command:
include_hostname = True
else:
include_hostname = False
if "--uptime" in command:
up = time.perf_counter() - client.first_execution
await message.channel.send(f"Uptime:\n`{up:.3f}` seconds\n`{up/86400:.4f}` days")
return
async with message.channel.typing():
if has_psutil:
try:
temp = psutil.sensors_temperatures()['cpu-thermal'][0].current
except (AttributeError, KeyError):
temp = None
self = psutil.Process()
cpu_self = self.cpu_percent(interval=1)
self_m_used = self.memory_info().rss
m_raw = psutil.virtual_memory()
m_total = m_raw.total
m_available = m_raw.available
m_used = m_total - m_available
cpu = psutil.cpu_percent(percpu=True)
index = 0
cpu_text = ""
for v in cpu:
cpu_text += f"**CPU {index}:** {v}%\n"
index += 1
embed = discord.Embed(title=f"{client.bot_name} stats", description=discord.Embed.Empty, color=0x404040)
up = time.perf_counter() - client.first_execution
embed = embed.add_field(name="Uptime", value=f"{up:.3f} seconds\n{up/86400:.4f} days")
embed = embed.add_field(name="Servers", value=len(client.guilds))
embed = embed.add_field(name="Total commands run in all servers since last reboot", value=client.command_count, inline=False)
mps = client.message_count / up
msg_freq = up / client.message_count
embed = embed.add_field(name="Total messages sent in all servers since last reboot", value=f"{client.message_count} ({mps:.4f}/sec) ({msg_freq:.4f} sec/message)", inline=False)
n_connected = len(client.voice_clients)
n_playing = len([x for x in client.voice_clients if x.is_playing()])
embed = embed.add_field(name="Connected voice chats", value=f"{n_connected} ({n_playing} playing)")
embed = embed.add_field(name="Bot Process ID", value=os.getpid())
if include_hostname: embed = embed.add_field(name="Host Machine Name", value=socket.gethostname())
if has_psutil:
embed = embed.add_field(name="Host CPU temperature", value=f"{int(temp) if temp is not None else 'Unknown'}")
embed = embed.add_field(name="Process Memory Usage", value=f"{self_m_used/(1024*1024):.3f} MiB")
embed = embed.add_field(name="Process CPU Usage (relative to one core)", value=f"{cpu_self:.1f}%")
embed = embed.add_field(name="System RAM Usage", value=f"{m_used/(1024*1024):.1f}/{m_total/(1024*1024):.1f} MiB ({(m_used/m_total)*100:.2f}%)")
embed = embed.add_field(name="System CPU Usage", value=cpu_text, inline=False)
embed = embed.set_footer(text=datetime.utcnow().__str__())
await message.channel.send(embed=embed)
|
#!/usr/bin/python
#
# Copyright 2018 Kaggle Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kaggle.api.kaggle_api_extended import KaggleApi
# python -m unittest tests.test_authenticate
import os
import unittest
class TestAuthenticate(unittest.TestCase):
def setUp(self):
print("setup class:%s" % self)
def tearDown(self):
print("teardown class:TestStuff")
# Environment
def test_environment_variables(self):
os.environ['KAGGLE_USERNAME'] = 'dinosaur'
os.environ['KAGGLE_KEY'] = 'xxxxxxxxxxxx'
api = KaggleApi()
# We haven't authenticated yet
self.assertTrue("key" not in api.config_values)
self.assertTrue("username" not in api.config_values)
api.authenticate()
# Should be set from the environment
self.assertEqual(api.config_values['key'], 'xxxxxxxxxxxx')
self.assertEqual(api.config_values['username'], 'dinosaur')
# Configuration Actions
def test_config_actions(self):
api = KaggleApi()
self.assertTrue(api.config_dir.endswith('.kaggle'))
self.assertEqual(api.get_config_value('doesntexist'), None)
if __name__ == '__main__':
unittest.main()
|
class Bandwidth:
months = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}
hosts = {}
storeId = {}
def add(self, host, bytes, date):
newDate = self.parseDate(date)
if (self.hosts.has_key(host) == False):
self.hosts[host] = {}
if (self.hosts[host].has_key(newDate) == False):
self.hosts[host][newDate] = 0;
self.hosts[host][newDate] += int(bytes)
def parseDate(self, date):
date = date[1:-1]
return ('-'.join([date[7:11], self.months[date[3:6]], date[0:2]]))
def getStoreId(self, domain):
if (self.storeId.has_key(domain) == False):
cursor = self.connection.cursor()
cursor.execute("SELECT id FROM store WHERE domain = %s" % self.connection.literal(domain))
row = cursor.fetchone()
if row is None:
raise Exception("Store %s does not exists." % domain)
self.storeId[domain] = row[0]
return self.storeId[domain]
def persist(self, connection):
self.connection = connection
print "Persisting to database"
for (host, items) in self.hosts.items():
for (date, bytes) in items.items():
storeId = self.getStoreId(host)
cursor = self.connection.cursor()
cursor.execute("SELECT store_id, date, bytes FROM log where store_id = %d AND date = '%s'" % (storeId, date))
row = cursor.fetchone()
cursor = self.connection.cursor()
if row is None:
cursor.execute("INSERT INTO log (store_id, date, bytes) VALUES (%d, '%s', %d)" % (storeId, date, bytes))
else:
cursor.execute("UPDATE log SET bytes = bytes + %d WHERE store_id = %d AND date = '%s'" % (bytes, storeId, date))
self.connection.commit()
self.connection.close() |
from ....utils.code_utils import deprecate_module
deprecate_module("ediFilesUtils", "edi_files_utils", "0.15.0")
from .edi_files_utils import *
|
#!/usr/bin/env python3
"""
Contains the interactions for GitHub webhooks
"""
import secrets
from fastapi import FastAPI, Depends, Request, Response, HTTPException, status
from fastapi.responses import FileResponse, HTMLResponse
from fastapi.security import HTTPBasic, HTTPBasicCredentials
import uvicorn
from ubiquiti_config_generator import file_paths
from ubiquiti_config_generator.github import api, checks, push, deployment
from ubiquiti_config_generator.messages import db
from ubiquiti_config_generator.web import page
app = FastAPI(
title="Ubiquiti Configuration Webhook Listener",
description="Listens to GitHub webhooks to "
"trigger actions on Ubiquiti configurations",
version="1.0",
)
security = HTTPBasic()
def authenticate(credentials: HTTPBasicCredentials = Depends(security)):
"""
Checks the current user for authentication
"""
logging_config = file_paths.load_yaml_from_file("deploy.yaml")["logging"]
correct_user = secrets.compare_digest(credentials.username, logging_config["user"])
correct_pass = secrets.compare_digest(credentials.password, logging_config["pass"])
if not (correct_user and correct_pass):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Basic"},
)
return credentials.username
@app.post("/")
async def on_webhook_action(request: Request) -> Response:
"""
Runs for each webhook action
"""
body = await request.body()
form = await request.json()
process_request(request.headers, body, form)
@app.get("/background.png")
async def background():
"""
Returns the background image
"""
return FileResponse("web/background.png")
@app.get("/main.css")
async def css():
"""
Returns the CSS file
"""
return FileResponse("web/main.css")
# pylint: disable=unused-argument
@app.get("/checks/{revision}", response_class=HTMLResponse)
async def check_status(revision: str, username: str = Depends(authenticate)):
"""
Returns the check status logs
"""
return render_check(revision)
# pylint: disable=unused-argument
@app.get("/deployments/{revision1}/{revision2}", response_class=HTMLResponse)
async def deployment_status(
revision1: str, revision2: str, username: str = Depends(authenticate)
):
"""
Returns the check status logs
"""
return render_deployment(revision1, revision2)
def render_check(revision: str) -> str:
"""
Renders the check status page
"""
check_details = db.get_check(revision)
return page.generate_page(
{
"type": "check",
"status": check_details.status,
"started": check_details.started_at,
"ended": check_details.ended_at,
"revision1": revision,
"logs": check_details.logs,
}
)
def render_deployment(revision1: str, revision2: str) -> str:
"""
Renders the deployment status page
"""
deployment_details = db.get_deployment(revision1, revision2)
return page.generate_page(
{
"type": "deployment",
"status": deployment_details.status,
"started": deployment_details.started_at,
"ended": deployment_details.ended_at,
"revision1": revision1,
"revision2": revision2,
"logs": deployment_details.logs,
}
)
def process_request(headers: dict, body: str, form: dict) -> Response:
"""
Perform the actual processing of a request
"""
deploy_config = file_paths.load_yaml_from_file("deploy.yaml")
if not api.validate_message(deploy_config, body, headers["x-hub-signature-256"]):
print("Unauthorized request!")
raise HTTPException(status_code=404, detail="Invalid body hash")
access_token = api.get_access_token(api.get_jwt(deploy_config))
print(
"Got event {0} with action {1}".format(
headers["x-github-event"], form.get("action", "")
)
)
if headers["x-github-event"] == "check_suite":
checks.handle_check_suite(form, access_token)
elif headers["x-github-event"] == "check_run":
checks.process_check_run(deploy_config, form, access_token)
elif headers["x-github-event"] == "push":
push.check_push_for_deployment(deploy_config, form, access_token)
elif headers["x-github-event"] == "deployment":
deployment.handle_deployment(form, deploy_config, access_token)
else:
print("Skipping event - no handler registered!")
def run_listener():
"""
Runs the listener
"""
deploy_config = file_paths.load_yaml_from_file("deploy.yaml")
uvicorn.run(
"webhook_listener:app", port=deploy_config["git"]["webhook-port"], reload=True
)
if __name__ == "__main__":
run_listener()
|
from .router import Router
from .parser import Parser
|
import os
import tempfile
def format_dict(d):
return ', '.join(['%s=%s' % (k, v) for k, v in d.items()])
def change_ext(filename, ext):
return os.path.splitext(filename)[0] + ('' if ext == '' else '.' + ext)
def url_basename(url, *, _d={}):
import re
from urllib.parse import urlparse
if _d.get('pat') is None:
_d['pat'] = re.compile(r'([^/]*)$')
match = _d['pat'].search(urlparse(url).path)
return match.group(1)
class TmpFile:
def __init__(self, suffix=None, prefix=None, dir=None):
self._suffix = suffix
self._prefix = prefix
self._dir = dir
def __enter__(self):
fd, filename = tempfile.mkstemp(self._suffix, self._prefix, self._dir)
os.close(fd)
self._filename = filename
return filename
def __exit__(self, exc_type, exc_value, traceback):
# os.remove(self._filename)
pass
def get_latest_app_version():
import requests
resp = requests.get('https://pypi.org/pypi/dl-coursera/json')
d = resp.json()
return sorted(d['releases'].keys())[-1]
|
# import
from sklearn.metrics import f1_score
import numpy as np
import matplotlib.pyplot as plt
import json
import pandas as pd
import torch
import os
from tqdm import tqdm
from data_generator import Dataset_train, Dataset_test
from metrics import Metric
from postprocessing import PostProcessing
def seed_everything(seed):
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
seed_everything(42)
class CVPipeline:
def __init__(self, hparams, split_table_path, split_table_name, debug_folder, model, gpu,downsample):
# load the model
self.hparams = hparams
self.model = model
self.gpu = gpu
self.downsample = downsample
print('\n')
print('Selected Learning rate:', self.hparams['lr'])
print('\n')
self.debug_folder = debug_folder
self.split_table_path = split_table_path
self.split_table_name = split_table_name
self.exclusions = ['S0431',
'S0326'
'S0453'
'S0458'
'A5766'
'A0227'
'A0238'
'A1516'
'A5179'
'Q1807'
'Q3568'
'E10256'
'E07341'
'E05758']
self.splits = self.load_split_table()
self.metric = Metric()
def load_split_table(self):
splits = []
split_files = [i for i in os.listdir(self.split_table_path) if i.find('fold') != -1]
for i in range(len(split_files)):
data = json.load(open(self.split_table_path + str(i) + '_' + self.split_table_name))
train_data = data['train']
for index, i in enumerate(train_data):
i = i.split('\\')
i = i[-1]
train_data[index] = i
val_data = data['val']
for index, i in enumerate(val_data):
i = i.split('\\')
i = i[-1]
val_data[index] = i
dataset_train = []
for i in train_data:
if i in self.exclusions:
continue
if i[0] != 'Q' and i[0] != 'S' and i[0] != 'A' and i[0] != 'H' and i[0] != 'E': # A, B , D, E datasets
continue
dataset_train.append(i)
dataset_val = []
for i in val_data:
if i in self.exclusions:
continue
if i[0] != 'Q' and i[0] != 'S' and i[0] != 'A' and i[0] != 'H' and i[0] != 'E': # A, B , D, E datasets
continue
dataset_val.append(i)
data['train'] = dataset_train#+self.additinal_data
data['val'] = dataset_val
splits.append(data)
splits = pd.DataFrame(splits)
return splits
def train(self):
score = 0
for fold in range(self.splits.shape[0]):
if fold is not None:
if fold != self.hparams['start_fold']:
continue
#TODO
train = Dataset_train(self.splits['train'].values[fold], aug=False,downsample=self.downsample)
valid = Dataset_train(self.splits['val'].values[fold], aug=False,downsample=self.downsample)
X, y = train.__getitem__(0)
self.model = self.model(
input_size=X.shape[0], n_channels=X.shape[1], hparams=self.hparams, gpu=self.gpu
)
# train model
self.model.fit(train=train, valid=valid)
# get model predictions
y_val,pred_val = self.model.predict(valid)
self.postprocessing = PostProcessing(fold=self.hparams['start_fold'])
pred_val_processed = self.postprocessing.run(pred_val)
# TODO: add activations
# heatmap = self.model.get_heatmap(valid)
fold_score = self.metric.compute(y_val, pred_val_processed)
print("Model's final scrore: ",fold_score)
# save the model
self.model.model_save(
self.hparams['model_path']
+ self.hparams['model_name']+f"_{self.hparams['start_fold']}"
+ '_fold_'
+ str(fold_score)
+ '.pt'
)
# create a dictionary for debugging
self.save_debug_data(pred_val, self.splits['val'].values[fold])
return fold_score
def save_debug_data(self, pred_val, validation_list):
for index, data in enumerate(validation_list):
if data[0] == 'A':
data_folder = 'A'
elif data[0] == 'Q':
data_folder = 'B'
elif data[0] == 'I':
data_folder = 'C'
elif data[0] == 'S':
data_folder = 'D'
elif data[0] == 'H':
data_folder = 'E'
elif data[0] == 'E':
data_folder = 'F'
data_folder = f'./data/CV_debug/{data_folder}/'
prediction = {}
prediction['predicted_label'] = pred_val[index].tolist()
# save debug data
with open(data_folder + data + '.json', 'w') as outfile:
json.dump(prediction, outfile)
return True
|
import requests
from build_assets import arg_getters, api_handler, util
import re
def main():
try:
print("Please wait a few seconds...")
args = arg_getters.get_release_message_args()
# fetch first page by default
data = api_handler.get_merged_pull_reqs_since_last_release(args.token)
newIcons = []
features = []
print("Parsing through the pull requests")
for pullData in data:
authors = api_handler.find_all_authors(pullData, args.token)
markdown = f"- [{pullData['title']}]({pullData['html_url']}) by {authors}."
if api_handler.is_feature_icon(pullData):
newIcons.append(markdown)
else:
features.append(markdown)
print("Constructing message")
thankYou = "A huge thanks to all our maintainers and contributors for making this release possible!"
iconTitle = f"**{len(newIcons)} New Icons**"
featureTitle = f"**{len(features)} New Features**"
finalString = "{0}\n\n {1}\n{2}\n\n {3}\n{4}".format(thankYou,
iconTitle, "\n".join(newIcons), featureTitle, "\n".join(features))
print("--------------Here is the build message--------------\n", finalString)
print("Script finished")
except Exception as e:
util.exit_with_err(e)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
######################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
# @file source/decoders/state.py
#
######################################################################
class DecoderState(object):
"""
State of Decoder.
"""
def __init__(self, hidden=None, **kwargs):
"""
hidden: Tensor(num_layers, batch_size, hidden_size)
"""
if hidden is not None:
self.hidden = hidden
for k, v in kwargs.items():
if v is not None:
self.__setattr__(k, v)
def __getattr__(self, name):
return self.__dict__.get(name)
def get_batch_size(self):
if self.hidden is not None:
return self.hidden.size(1)
else:
return next(iter(self.__dict__.values())).size(0)
def size(self):
sizes = {k: v.size() for k, v in self.__dict__.items()}
return sizes
def slice_select(self, stop):
kwargs = {}
for k, v in self.__dict__.items():
if k == "hidden":
kwargs[k] = v[:, :stop].clone()
else:
kwargs[k] = v[:stop]
return DecoderState(**kwargs)
def index_select(self, indices):
kwargs = {}
for k, v in self.__dict__.items():
if k == 'hidden':
kwargs[k] = v.index_select(1, indices)
else:
kwargs[k] = v.index_select(0, indices)
return DecoderState(**kwargs)
def mask_select(self, mask):
kwargs = {}
for k, v in self.__dict__.items():
if k == "hidden":
kwargs[k] = v[:, mask]
else:
kwargs[k] = v[mask]
return DecoderState(**kwargs)
def _inflate_tensor(self, X, times):
"""
inflate X from shape (batch_size, ...) to shape (batch_size*times, ...)
for first decoding of beam search
"""
sizes = X.size()
if X.dim() == 1:
X = X.unsqueeze(1)
repeat_times = [1] * X.dim()
repeat_times[1] = times
X = X.repeat(*repeat_times).view(-1, *sizes[1:])
return X
def inflate(self, times):
kwargs = {}
for k, v in self.__dict__.items():
if k == "hidden":
num_layers, batch_size, _ = v.size()
kwargs[k] = v.repeat(1, 1, times).view(
num_layers, batch_size*times, -1)
else:
kwargs[k] = self._inflate_tensor(v, times)
return DecoderState(**kwargs)
|
import logging
from os import path
import operator
import time
import traceback
from ast import literal_eval
from flask import request
# import mysql.connector
# from mysql.connector import errorcode
# @added 20180720 - Feature #2464: luminosity_remote_data
# Added redis and msgpack
from redis import StrictRedis
from msgpack import Unpacker
# @added 20201103 - Feature #3824: get_cluster_data
import requests
# @added 20201125 - Feature #3850: webapp - yhat_values API endoint
import numpy as np
# @added 20210328 - Feature #3994: Panorama - mirage not anomalous
import pandas as pd
import settings
from skyline_functions import (
mysql_select,
# @added 20180720 - Feature #2464: luminosity_remote_data
# nonNegativeDerivative, in_list, is_derivative_metric,
# @added 20200507 - Feature #3532: Sort all time series
# Added sort_timeseries and removed unused in_list
nonNegativeDerivative, sort_timeseries,
# @added 20201123 - Feature #3824: get_cluster_data
# Feature #2464: luminosity_remote_data
# Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
get_redis_conn_decoded,
# @added 20201125 - Feature #3850: webapp - yhat_values API endoint
get_graphite_metric,
# @added 20210328 - Feature #3994: Panorama - mirage not anomalous
filesafe_metricname, mkdir_p)
# @added 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
# from database_queries import (
# db_query_metric_id_from_base_name, db_query_latest_anomalies,
# db_query_metric_ids_from_metric_like)
# @added 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
# Task #4030: refactoring
from functions.database.queries.metric_id_from_base_name import metric_id_from_base_name
from functions.database.queries.metric_ids_from_metric_like import metric_ids_from_metric_like
from functions.database.queries.latest_anomalies import latest_anomalies as db_latest_anomalies
# @added 20210617 - Feature #4144: webapp - stale_metrics API endpoint
# Feature #4076: CUSTOM_STALE_PERIOD
# Branch #1444: thunder
from functions.thunder.stale_metrics import thunder_stale_metrics
import skyline_version
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
REQUEST_ARGS = ['from_date',
'from_time',
'from_timestamp',
'until_date',
'until_time',
'until_timestamp',
'target',
'like_target',
'source',
'host',
'algorithm',
# @added 20161127 - Branch #922: ionosphere
'panorama_anomaly_id',
]
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except Exception as e:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings - %s' % e)
ENABLE_WEBAPP_DEBUG = False
# @added 20180720 - Feature #2464: luminosity_remote_data
# Added REDIS_CONN
if settings.REDIS_PASSWORD:
REDIS_CONN = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
REDIS_CONN = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
def panorama_request():
"""
Gets the details of anomalies from the database, using the URL arguments
that are passed in by the :obj:`request.args` to build the MySQL select
query string and queries the database, parse the results and creates an
array of the anomalies that matched the query and creates the
``panaroma.json`` file, then returns the array. The Webapp needs both the
array and the JSONP file to serve to the browser for the client side
``panaroma.js``.
:param None: determined from :obj:`request.args`
:return: array
:rtype: array
.. note:: And creates ``panaroma.js`` for client side javascript
"""
logger.info('determining request args')
def get_ids_from_rows(thing, rows):
found_ids = []
for row in rows:
found_id = str(row[0])
found_ids.append(int(found_id))
# @modified 20191014 - Task #3270: Deprecate string.replace for py3
# Branch #3262: py3
# ids_first = string.replace(str(found_ids), '[', '')
# in_ids = string.replace(str(ids_first), ']', '')
found_ids_str = str(found_ids)
ids_first = found_ids_str.replace('[', '')
in_ids = ids_first.replace(']', '')
return in_ids
try:
request_args_len = len(request.args)
except:
request_args_len = False
latest_anomalies = False
if request_args_len == 0:
request_args_len = 'No request arguments passed'
# return str(request_args_len)
latest_anomalies = True
metric = False
# @modified 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# if metric
# logger.info('Getting db id for %s' % metric)
# # @modified 20170913 - Task #2160: Test skyline with bandit
# # Added nosec to exclude from bandit tests
# query = 'select id from metrics WHERE metric=\'%s\'' % metric # nosec
# try:
# result = mysql_select(skyline_app, query)
# except:
# logger.error('error :: failed to get id from db: %s' % traceback.format_exc())
# result = 'metric id not found in database'
# return str(result[0][0])
search_request = True
count_request = False
if latest_anomalies:
logger.info('Getting latest anomalies')
# @modified 20191108 - Feature #3306: Record the anomaly_end_timestamp
# Branch #3262: py3
# query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp from anomalies ORDER BY id DESC LIMIT 10'
# @modified 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
# query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies ORDER BY id DESC LIMIT 10'
# try:
# rows = mysql_select(skyline_app, query)
# except:
# logger.error('error :: failed to get anomalies from db: %s' % traceback.format_exc())
# rows = []
# @added 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
rows = []
try:
rows = db_latest_anomalies(skyline_app)
except:
logger.error('error :: failed to get anomalies from db: %s' % traceback.format_exc())
rows = []
if not latest_anomalies:
logger.info('Determining search parameters')
# @modified 20191108 - Feature #3306: Record the end_timestamp of anomalies
# Branch #3262: py3
# query_string = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp from anomalies'
query_string = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies'
needs_and = False
# If we have to '' a string we cannot escape the query it seems...
# do_not_escape = False
if 'metric' in request.args:
metric = request.args.get('metric', None)
# if metric and metric != 'all':
if isinstance(metric, str) and metric != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
# @modified 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
# query = "select id from metrics WHERE metric='%s'" % (metric) # nosec
# try:
# found_id = mysql_select(skyline_app, query)
# except:
# logger.error('error :: failed to get app ids from db: %s' % traceback.format_exc())
# found_id = None
# @added 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
found_id = None
if metric.startswith(settings.FULL_NAMESPACE):
base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = str(metric)
try:
# found_id = db_query_metric_id_from_base_name(skyline_app, base_name)
found_id = metric_id_from_base_name(skyline_app, base_name)
except:
logger.error('error :: failed to get metric id from db: %s' % traceback.format_exc())
found_id = None
if found_id:
# target_id = str(found_id[0][0])
target_id = str(found_id)
if needs_and:
new_query_string = '%s AND metric_id=%s' % (query_string, target_id)
else:
new_query_string = '%s WHERE metric_id=%s' % (query_string, target_id)
query_string = new_query_string
needs_and = True
# in_ids_str = None
if 'metric_like' in request.args:
metric_like = request.args.get('metric_like', None)
metrics_like_str = None
# if metric_like and metric_like != 'all':
if isinstance(metric_like, str) and metric_like != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
rows_returned = None
# @modified 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
# query = 'select id from metrics WHERE metric LIKE \'%s\'' % (str(metric_like)) # nosec
# try:
# rows = mysql_select(skyline_app, query)
# except:
# logger.error('error :: failed to get metric ids from db: %s' % traceback.format_exc())
# return False
# rows_returned = None
# try:
# rows_returned = rows[0]
# if ENABLE_WEBAPP_DEBUG:
# logger.info('debug :: rows - rows[0] - %s' % str(rows[0]))
# except:
# rows_returned = False
# if ENABLE_WEBAPP_DEBUG:
# logger.info('debug :: no rows returned')
# @added 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
metrics_like_str = str(metric_like)
db_metric_ids = None
try:
db_metric_ids = metric_ids_from_metric_like(skyline_app, metrics_like_str)
except Exception as e:
logger.error('error :: failed to get metric ids from db: %s' % e)
return False
use_db_metric_ids = True
if db_metric_ids and use_db_metric_ids:
rows_returned = False
ids = ''
for db_metric_id in db_metric_ids:
if ids == '':
ids = '%s' % str(db_metric_id)
else:
ids = '%s, %s' % (ids, str(db_metric_id))
new_query_string = '%s WHERE metric_id IN (%s)' % (query_string, str(ids))
else:
# Get nothing
new_query_string = '%s WHERE metric_id IN (0)' % (query_string)
if ENABLE_WEBAPP_DEBUG:
logger.info('debug :: no rows returned using new_query_string - %s' % new_query_string)
if not use_db_metric_ids:
if rows_returned:
ids = get_ids_from_rows('metric', rows)
new_query_string = '%s WHERE metric_id IN (%s)' % (query_string, str(ids))
logger.info('debug :: id is %s chars long after adding get_ids_from_rows, new_query_string: %s' % (
str(len(ids)), new_query_string))
else:
# Get nothing
new_query_string = '%s WHERE metric_id IN (0)' % (query_string)
if ENABLE_WEBAPP_DEBUG:
logger.info('debug :: no rows returned using new_query_string - %s' % new_query_string)
query_string = new_query_string
needs_and = True
if 'count_by_metric' in request.args:
count_by_metric = request.args.get('count_by_metric', None)
if count_by_metric and count_by_metric != 'false':
search_request = False
count_request = True
# query_string = 'SELECT metric_id, COUNT(*) FROM anomalies GROUP BY metric_id ORDER BY COUNT(*) DESC'
query_string = 'SELECT metric_id, COUNT(*) FROM anomalies'
needs_and = False
if 'from_timestamp' in request.args:
from_timestamp = request.args.get('from_timestamp', None)
if from_timestamp and from_timestamp != 'all':
if ":" in from_timestamp:
import time
import datetime
# @modified 20211021 - handle multiple date formats
try:
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
except ValueError:
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y-%m-%d %H:%M').timetuple())
except Exception as err:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: panorama_request :: failed to unix timestamp from from_timestamp - %s' % str(err)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
from_timestamp = str(int(new_from_timestamp))
if needs_and:
new_query_string = '%s AND anomaly_timestamp >= %s' % (query_string, from_timestamp)
query_string = new_query_string
needs_and = True
else:
new_query_string = '%s WHERE anomaly_timestamp >= %s' % (query_string, from_timestamp)
query_string = new_query_string
needs_and = True
if 'until_timestamp' in request.args:
until_timestamp = request.args.get('until_timestamp', None)
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
import time
import datetime
# @modified 20211021 - handle multiple date formats
try:
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
except ValueError:
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y-%m-%d %H:%M').timetuple())
except Exception as err:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: panorama_request :: failed to unix timestamp from until_timestamp - %s' % str(err)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
until_timestamp = str(int(new_until_timestamp))
if needs_and:
new_query_string = '%s AND anomaly_timestamp <= %s' % (query_string, until_timestamp)
query_string = new_query_string
needs_and = True
else:
new_query_string = '%s WHERE anomaly_timestamp <= %s' % (query_string, until_timestamp)
query_string = new_query_string
needs_and = True
if 'app' in request.args:
app = request.args.get('app', None)
if app and app != 'all':
# @added 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# Sanitise variable
if isinstance(app, str):
for_app = str(app)
else:
for_app = 'none'
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
# @modified 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# query = 'select id from apps WHERE app=\'%s\'' % (str(app)) # nosec
query = 'select id from apps WHERE app=\'%s\'' % (str(for_app))
try:
found_id = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get app ids from db: %s' % traceback.format_exc())
found_id = None
if found_id:
target_id = str(found_id[0][0])
if needs_and:
new_query_string = '%s AND app_id=%s' % (query_string, target_id)
else:
new_query_string = '%s WHERE app_id=%s' % (query_string, target_id)
query_string = new_query_string
needs_and = True
if 'source' in request.args:
source = request.args.get('source', None)
if source and source != 'all':
# @added 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# Sanitise variable
if isinstance(source, str):
for_source = str(source)
else:
for_source = 'none'
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
# @modified 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# query = 'select id from sources WHERE source=\'%s\'' % (str(source)) # nosec
query = 'select id from sources WHERE source=\'%s\'' % (str(for_source))
try:
found_id = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get source id from db: %s' % traceback.format_exc())
found_id = None
if found_id:
target_id = str(found_id[0][0])
if needs_and:
new_query_string = '%s AND source_id=\'%s\'' % (query_string, target_id)
else:
new_query_string = '%s WHERE source_id=\'%s\'' % (query_string, target_id)
query_string = new_query_string
needs_and = True
if 'algorithm' in request.args:
algorithm = request.args.get('algorithm', None)
# DISABLED as it is difficult match algorithm_id in the
# triggered_algorithms csv list
algorithm = 'all'
# @modified 20210421 - Task #4030: refactoring
# semgrep - python.lang.correctness.useless-comparison.no-strings-as-booleans
# if algorithm and algorithm != 'all':
use_all_for_algorithm = True
if use_all_for_algorithm and algorithm != 'all':
# @added 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# Sanitise variable
if isinstance(algorithm, str):
for_algorithm = str(algorithm)
else:
for_algorithm = 'none'
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
# @modified 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# query = 'select id from algorithms WHERE algorithm LIKE \'%s\'' % (str(algorithm)) # nosec
query = 'select id from algorithms WHERE algorithm LIKE \'%s\'' % (str(for_algorithm))
try:
rows = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get algorithm ids from db: %s' % traceback.format_exc())
rows = []
ids = get_ids_from_rows('algorithm', rows)
if needs_and:
new_query_string = '%s AND algorithm_id IN (%s)' % (query_string, str(ids))
else:
new_query_string = '%s WHERE algorithm_id IN (%s)' % (query_string, str(ids))
query_string = new_query_string
needs_and = True
if 'host' in request.args:
host = request.args.get('host', None)
if host and host != 'all':
# @added 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# Sanitise variable
if isinstance(host, str):
for_host = str(host)
else:
for_host = 'none'
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
# @modified 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# query = 'select id from hosts WHERE host=\'%s\'' % (str(host)) # nosec
query = 'select id from hosts WHERE host=\'%s\'' % (str(for_host))
try:
found_id = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get host id from db: %s' % traceback.format_exc())
found_id = None
if found_id:
target_id = str(found_id[0][0])
if needs_and:
new_query_string = '%s AND host_id=\'%s\'' % (query_string, target_id)
else:
new_query_string = '%s WHERE host_id=\'%s\'' % (query_string, target_id)
query_string = new_query_string
needs_and = True
if 'limit' in request.args:
# @modified 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# limit = request.args.get('limit', '10')
limit_str = request.args.get('limit', '10')
try:
limit = int(limit_str) + 0
except Exception as e:
logger.error('error :: limit parameter not an int: %s' % e)
limit = 10
else:
limit = '10'
if 'order' in request.args:
# @modified 20210504 - Task #4030: refactoring
# Task #4022: Move mysql_select calls to SQLAlchemy
# order = request.args.get('order', 'DESC')
order_str = request.args.get('order', 'DESC')
if order_str == 'ASC':
order = 'ASC'
else:
order = 'DESC'
else:
order = 'DESC'
search_query = '%s ORDER BY id %s LIMIT %s' % (
query_string, order, str(limit))
if 'count_by_metric' in request.args:
count_by_metric = request.args.get('count_by_metric', None)
if count_by_metric and count_by_metric != 'false':
# query_string = 'SELECT metric_id, COUNT(*) FROM anomalies GROUP BY metric_id ORDER BY COUNT(*) DESC'
search_query = '%s GROUP BY metric_id ORDER BY COUNT(*) %s LIMIT %s' % (
query_string, order, limit)
try:
rows = mysql_select(skyline_app, search_query)
except:
logger.error('error :: failed to get anomalies from db: %s' % traceback.format_exc())
rows = []
anomalies = []
anomalous_metrics = []
if search_request:
# @modified 20191014 - Task #3270: Deprecate string.replace for py3
# Branch #3262: py3
anomalies_json = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP))
# panorama_json = string.replace(str(anomalies_json), 'anomalies.json', 'panorama.json')
panorama_json = anomalies_json.replace('anomalies.json', 'panorama.json')
if ENABLE_WEBAPP_DEBUG:
logger.info('debug :: panorama_json - %s' % str(panorama_json))
for row in rows:
if search_request:
anomaly_id = str(row[0])
metric_id = str(row[1])
if count_request:
metric_id = str(row[0])
anomaly_count = str(row[1])
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select metric from metrics WHERE id=%s' % metric_id # nosec
try:
result = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get id from db: %s' % traceback.format_exc())
continue
metric = str(result[0][0])
if search_request:
anomalous_datapoint = str(row[2])
anomaly_timestamp = str(row[3])
anomaly_timestamp = str(row[3])
full_duration = str(row[4])
created_timestamp = str(row[5])
# @modified 20191108 - Feature #3306: Record the anomaly_end_timestamp
# Branch #3262: py3
# anomaly_data = (anomaly_id, metric, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp)
# anomalies.append([int(anomaly_id), str(metric), anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp])
anomaly_end_timestamp = str(row[6])
# anomaly_data = (anomaly_id, metric, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp)
anomalies.append([int(anomaly_id), str(metric), anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp])
anomalous_metrics.append(str(metric))
if count_request:
limit_argument = anomaly_count
if int(anomaly_count) > 100:
limit_argument = 100
# anomaly_data = (int(anomaly_count), metric, str(limit_argument))
anomalies.append([int(anomaly_count), str(metric), str(limit_argument)])
anomalies.sort(key=operator.itemgetter(int(0)))
if search_request:
with open(panorama_json, 'w') as fh:
pass
# Write anomalous_metrics to static webapp directory
with open(panorama_json, 'a') as fh:
# Make it JSONP with a handle_data() function
fh.write('handle_data(%s)' % anomalies)
if latest_anomalies:
return anomalies
else:
return search_query, anomalies
def get_list(thing):
"""
Get a list of names for things in a database table.
:param thing: the thing, e.g. 'algorithm'
:type thing: str
:return: list
:rtype: list
"""
table = '%ss' % thing
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select %s from %s' % (thing, table) # nosec
logger.info('get_list :: select %s from %s' % (thing, table)) # nosec
# got_results = False
try:
results = mysql_select(skyline_app, query)
# got_results = True
except:
logger.error('error :: failed to get list of %ss from %s' % (thing, table))
results = None
things = []
results_array_valid = False
try:
test_results = results[0]
if test_results:
results_array_valid = True
except:
logger.error('error :: invalid results array for get list of %ss from %s' % (thing, table))
# @modified 20210415 - Feature #4014: Ionosphere - inference
# Stop logging results in webapp
if results_array_valid:
# @modified 20210415 - Feature #4014: Ionosphere - inference
# Stop logging results in webapp
# logger.info('results: %s' % str(results))
# for result in results:
# things.append(str(result[0]))
# logger.info('things: %s' % str(things))
logger.info('get_list :: returned valid result: %s' % str(results_array_valid))
return things
# @added 20180720 - Feature #2464: luminosity_remote_data
# @modified 20201203 - Feature #3860: luminosity - handle low frequency data
# Add the metric resolution
# def luminosity_remote_data(anomaly_timestamp):
def luminosity_remote_data(anomaly_timestamp, resolution):
"""
Gets all the unique_metrics from Redis and then mgets Redis data for all
metrics. The data is then preprocessed for the remote Skyline luminosity
instance and only the relevant fragments of the time series are
returned. This return is then gzipped by the Flask Webapp response to
ensure the minimum about of bandwidth is used.
:param anomaly_timestamp: the anomaly timestamp
:type anomaly_timestamp: int
:return: list
:rtype: list
"""
message = 'luminosity_remote_data returned'
success = False
luminosity_data = []
logger.info('luminosity_remote_data :: determining unique_metrics')
unique_metrics = []
# If you modify the values of 61 or 600 here, it must be modified in the
# luminosity_remote_data function in
# skyline/luminosity/process_correlations.py as well
# @modified 20201203 - Feature #3860: luminosity - handle low frequency data
# Use the metric resolution
# from_timestamp = int(anomaly_timestamp) - 600
# until_timestamp = int(anomaly_timestamp) + 61
from_timestamp = int(anomaly_timestamp) - (resolution * 10)
until_timestamp = int(anomaly_timestamp) + (resolution + 1)
try:
# @modified 20201123 - Feature #3824: get_cluster_data
# Feature #2464: luminosity_remote_data
# Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# unique_metrics = list(REDIS_CONN.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
REDIS_CONN_DECODED = get_redis_conn_decoded(skyline_app)
unique_metrics = list(REDIS_CONN_DECODED.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
except Exception as e:
logger.error('error :: %s' % str(e))
logger.error('error :: luminosity_remote_data :: could not determine unique_metrics from Redis set')
if not unique_metrics:
message = 'error :: luminosity_remote_data :: could not determine unique_metrics from Redis set'
return luminosity_data, success, message
logger.info('luminosity_remote_data :: %s unique_metrics' % str(len(unique_metrics)))
# @added 20210125 - Feature #3956: luminosity - motifs
# Improve luminosity_remote_data performance
# Although the is_derivative_metric function is appropriate in the below
# loop here that is not the most performant manner in which to determine if
# the metrics are derivatives, as it needs to fire on every metric, so here
# we just trust the Redis derivative_metrics list. This increases
# performance on 1267 metrics from 6.442009 seconds to 1.473067 seconds
try:
# @modified 20211012 - Feature #4280: aet.metrics_manager.derivative_metrics Redis hash
# derivative_metrics = list(REDIS_CONN_DECODED.smembers('derivative_metrics'))
derivative_metrics = list(REDIS_CONN_DECODED.smembers('aet.metrics_manager.derivative_metrics'))
except:
derivative_metrics = []
# assigned metrics
assigned_min = 0
assigned_max = len(unique_metrics)
assigned_keys = range(assigned_min, assigned_max)
# Compile assigned metrics
assigned_metrics = [unique_metrics[index] for index in assigned_keys]
# Check if this process is unnecessary
if len(assigned_metrics) == 0:
message = 'error :: luminosity_remote_data :: assigned_metrics length is 0'
logger.error(message)
return luminosity_data, success, message
# Multi get series
raw_assigned_failed = True
try:
raw_assigned = REDIS_CONN.mget(assigned_metrics)
raw_assigned_failed = False
except:
logger.info(traceback.format_exc())
message = 'error :: luminosity_remote_data :: failed to mget raw_assigned'
logger.error(message)
return luminosity_data, success, message
if raw_assigned_failed:
message = 'error :: luminosity_remote_data :: failed to mget raw_assigned'
logger.error(message)
return luminosity_data, success, message
# Distill timeseries strings into lists
for i, metric_name in enumerate(assigned_metrics):
timeseries = []
try:
raw_series = raw_assigned[i]
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
except:
timeseries = []
if not timeseries:
continue
# @added 20200507 - Feature #3532: Sort all time series
# To ensure that there are no unordered timestamps in the time
# series which are artefacts of the collector or carbon-relay, sort
# all time series by timestamp before analysis.
original_timeseries = timeseries
if original_timeseries:
timeseries = sort_timeseries(original_timeseries)
del original_timeseries
# Convert the time series if this is a known_derivative_metric
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
# @added 20201117 - Feature #3824: get_cluster_data
# Feature #2464: luminosity_remote_data
# Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Convert metric_name bytes to str
metric_name = str(metric_name)
# @modified 20210125 - Feature #3956: luminosity - motifs
# Improve luminosity_remote_data performance
# Although the is_derivative_metric function is appropriate here it is
# not the most performant manner in which to determine if the metric
# is a derivative in this case as it needs to fire on every metric, so
# here we just trust the Redis derivative_metrics list. This increases
# performance on 1267 metrics from 6.442009 seconds to 1.473067 seconds
# if metric_name.startswith(settings.FULL_NAMESPACE):
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
# else:
# base_name = metric_name
# known_derivative_metric = is_derivative_metric('webapp', base_name)
known_derivative_metric = False
if metric_name in derivative_metrics:
known_derivative_metric = True
if known_derivative_metric:
try:
derivative_timeseries = nonNegativeDerivative(timeseries)
timeseries = derivative_timeseries
except:
logger.error('error :: nonNegativeDerivative failed')
# @modified 20210125 - Feature #3956: luminosity - motifs
# Improve luminosity_remote_data performance
# The list comprehension method halves the time to create the
# correlate_ts from 0.0008357290644198656 to 0.0004676780663430691 seconds
# correlate_ts = []
# for ts, value in timeseries:
# if int(ts) < from_timestamp:
# continue
# if int(ts) <= anomaly_timestamp:
# correlate_ts.append((int(ts), value))
# if int(ts) > (anomaly_timestamp + until_timestamp):
# break
correlate_ts = [x for x in timeseries if x[0] >= from_timestamp if x[0] <= until_timestamp]
if not correlate_ts:
continue
metric_data = [str(metric_name), correlate_ts]
luminosity_data.append(metric_data)
logger.info('luminosity_remote_data :: %s valid metric time series data preprocessed for the remote request' % str(len(luminosity_data)))
return luminosity_data, success, message
# @added 20200908 - Feature #3740: webapp - anomaly API endpoint
def panorama_anomaly_details(anomaly_id):
"""
Gets the details for an anomaly from the database.
"""
logger.info('panorama_anomaly_details - getting details for anomaly id %s' % str(anomaly_id))
metric_id = 0
# Added nosec to exclude from bandit tests
query = 'select metric_id from anomalies WHERE id=\'%s\'' % str(anomaly_id) # nosec
try:
result = mysql_select(skyline_app, query)
metric_id = int(result[0][0])
except:
logger.error(traceback.format_exc())
logger.error('error :: panorama_anomaly_details - failed to get metric_id from db')
return False
if metric_id > 0:
logger.info('panorama_anomaly_details - getting metric for metric_id - %s' % str(metric_id))
# Added nosec to exclude from bandit tests
query = 'select metric from metrics WHERE id=\'%s\'' % str(metric_id) # nosec
try:
result = mysql_select(skyline_app, query)
metric = str(result[0][0])
except:
logger.error(traceback.format_exc())
logger.error('error :: panorama_anomaly_details - failed to get metric from db')
return False
query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies WHERE id=\'%s\'' % str(anomaly_id) # nosec
logger.info('panorama_anomaly_details - running query - %s' % str(query))
try:
rows = mysql_select(skyline_app, query)
except:
logger.error(traceback.format_exc())
logger.error('error :: panorama_anomaly_details - failed to get anomaly details from db')
return False
anomaly_data = None
for row in rows:
anomalous_datapoint = float(row[2])
anomaly_timestamp = int(row[3])
full_duration = int(row[4])
created_timestamp = str(row[5])
try:
anomaly_end_timestamp = int(row[6])
except:
anomaly_end_timestamp = None
anomaly_data = [int(anomaly_id), str(metric), anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp]
break
return anomaly_data
# @added 20201103 - Feature #3824: get_cluster_data
# @modified 20201127 - Feature #3824: get_cluster_data
# Feature #3820: HORIZON_SHARDS
# Allow to query only a single host in the cluster so that just the response
# can from a single host in the cluster can be evaluated
def get_cluster_data(api_endpoint, data_required, only_host='all', endpoint_params={}):
"""
Gets data from the /api of REMOTE_SKYLINE_INSTANCES. This allows the user
to query a single Skyline webapp node in a cluster and the Skyline instance
will respond with the concentated responses of all the
REMOTE_SKYLINE_INSTANCES in one a single response.
:param api_endpoint: the api endpoint to request data from the remote
Skyline instances
:param data_required: the element from the api json response that is
required
:param only_host: The remote Skyline host to query, if not passed all are
queried.
:param endpoint_params: A dictionary of any additional parameters that may
be required
:type api_endpoint: str
:type data_required: str
:type only_host: str
:type endpoint_params: dict
:return: list
:rtype: list
"""
try:
connect_timeout = int(settings.GRAPHITE_CONNECT_TIMEOUT)
read_timeout = int(settings.GRAPHITE_READ_TIMEOUT)
except:
connect_timeout = 5
read_timeout = 10
use_timeout = (int(connect_timeout), int(read_timeout))
data = []
if only_host != 'all':
logger.info('get_cluster_data :: querying all remote hosts as only_host set to %s' % (
str(only_host)))
for item in settings.REMOTE_SKYLINE_INSTANCES:
r = None
user = None
password = None
use_auth = False
# @added 20201127 - Feature #3824: get_cluster_data
# Feature #3820: HORIZON_SHARDS
# Allow to query only a single host in the cluster so that just the response
# can from a single host in the cluster can be evaluated
if only_host != 'all':
if only_host != str(item[0]):
logger.info('get_cluster_data :: not querying %s as only_host set to %s' % (
str(item[0]), str(only_host)))
continue
else:
logger.info('get_cluster_data :: querying %s as only_host set to %s' % (
str(item[0]), str(only_host)))
try:
user = str(item[1])
password = str(item[2])
use_auth = True
except:
user = None
password = None
logger.info('get_cluster_data :: querying %s for %s on %s' % (
str(item[0]), str(data_required), str(api_endpoint)))
try:
url = '%s/api?%s' % (str(item[0]), api_endpoint)
if use_auth:
r = requests.get(url, timeout=use_timeout, auth=(user, password))
else:
r = requests.get(url, timeout=use_timeout)
except:
logger.error(traceback.format_exc())
logger.error('error :: get_cluster_data :: failed to %s from %s' % (
api_endpoint, str(item)))
if r:
if r.status_code != 200:
logger.error('error :: get_cluster_data :: %s from %s responded with status code %s and reason %s' % (
api_endpoint, str(item), str(r.status_code), str(r.reason)))
js = None
try:
js = r.json()
except:
logger.error(traceback.format_exc())
logger.error('error :: get_cluster_data :: failed to get json from the response from %s on %s' % (
api_endpoint, str(item)))
remote_data = []
if js:
logger.info('get_cluster_data :: got response for %s from %s' % (
str(data_required), str(item[0])))
try:
remote_data = js['data'][data_required]
except:
logger.error(traceback.format_exc())
logger.error('error :: get_cluster_data :: failed to build remote_data from %s on %s' % (
str(data_required), str(item)))
if remote_data:
# @modified 20210617 - Feature #4144: webapp - stale_metrics API endpoint
# Handle list and dic items
if isinstance(remote_data, list):
logger.info('get_cluster_data :: got %s %s from %s' % (
str(len(remote_data)), str(data_required), str(item[0])))
data = data + remote_data
if isinstance(remote_data, dict):
logger.info('get_cluster_data :: got %s %s from %s' % (
str(len(remote_data)), str(data_required), str(item[0])))
data.append(remote_data)
return data
# @added 20201125 - Feature #3850: webapp - yhat_values API endoint
def get_yhat_values(
metric, from_timestamp, until_timestamp, include_value, include_mean,
include_yhat_real_lower, include_anomalous_periods):
timeseries = []
try:
logger.info('get_yhat_values :: for %s from %s until %s' % (
metric, str(from_timestamp), str(until_timestamp)))
timeseries = get_graphite_metric('webapp', metric, from_timestamp, until_timestamp, 'list', 'object')
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed to get timeseries data for %s' % (
metric))
return None
yhat_dict = {}
logger.info('get_yhat_values :: %s values in timeseries for %s to calculate yhat values from' % (
str(len(timeseries)), metric))
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
anomalous_periods_dict = {}
if timeseries:
metric_id = 0
if metric:
logger.info('get_yhat_values :: getting db id for metric - %s' % metric)
query = 'select id from metrics WHERE metric=\'%s\'' % metric # nosec
try:
result = mysql_select(skyline_app, query)
metric_id = int(result[0][0])
except:
logger.error('error :: get_yhat_values :: failed to get id from db: %s' % traceback.format_exc())
anomalies_at = []
if metric_id:
logger.info('get_yhat_values :: getting latest anomalies')
query = 'select anomaly_timestamp, anomalous_datapoint, anomaly_end_timestamp from anomalies WHERE metric_id=%s AND anomaly_timestamp >= %s AND anomaly_timestamp <= %s' % (
str(metric_id), str(from_timestamp), str(until_timestamp))
try:
rows = mysql_select(skyline_app, query)
for row in rows:
a_timestamp = int(row[0])
a_value = float(row[1])
try:
a_end_timestamp = int(row[2])
except:
a_end_timestamp = 0
anomalies_at.append([a_timestamp, a_value, a_end_timestamp])
except:
logger.error('error :: get_yhat_values :: failed to get anomalies from db: %s' % traceback.format_exc())
rows = []
timeseries_ranges = []
last_timestamp = None
for index, item in enumerate(timeseries):
if last_timestamp:
t_range = list(range(last_timestamp, int(item[0])))
timeseries_ranges.append([index, t_range, item])
last_timestamp = int(item[0])
t_range = list(range(last_timestamp, (int(item[0]) + 1)))
timeseries_ranges.append([index, t_range, item])
anomalies_index = []
for index, time_range, item in timeseries_ranges:
for a_timestamp, a_value, a_end_timestamp in anomalies_at:
if a_timestamp in time_range:
anomalies_index.append([index, item])
anomalous_period_indices = []
anomalies_indices = [item[0] for item in anomalies_index]
for index, item in enumerate(timeseries):
for idx in anomalies_indices:
anomaly_index_range = list(range((idx - 3), (idx + 5)))
if index in anomaly_index_range:
for i in anomaly_index_range:
anomalous_period_indices.append(i)
anomaly_timestamps_indices = []
anomalies = []
for item in anomalies_index:
anomaly_timestamps_indices.append(item[0])
anomalies.append(item[1])
top = []
bottom = []
left = []
right = []
if timeseries:
try:
array_amin = np.amin([item[1] for item in timeseries])
values = []
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
# last_value = None
# start_anomalous_period = None
# end_anomalous_period = None
# sigma3_array = []
# sigma3_values = []
# extended_values = []
last_breach = 0
breach_for = 10
last_breach_vector = 'positive'
# last_used_extended = False
# last_used_extended_value = None
top = []
bottom = []
left = []
right = []
# @modified 20210126 - Task #3958: Handle secondary algorithms in yhat_values
# for ts, value in timeseries:
# values.append(value)
# va = np.array(values)
# va_mean = va.mean()
# va_std_3 = 3 * va.std()
for index, item in enumerate(timeseries):
ts = item[0]
value = item[1]
values.append(value)
va = np.array(values)
va_mean = va.mean()
va_std_3 = 3 * va.std()
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
anomalous_period = 0
three_sigma_lower = va_mean - va_std_3
three_sigma_upper = va_mean + va_std_3
# sigma3_array.append([ts, value, va_mean, [three_sigma_lower, three_sigma_upper]])
# sigma3_values.append([three_sigma_lower, three_sigma_upper])
use_extended = False
drop_expected_range = False
if index not in anomaly_timestamps_indices:
use_extended = True
# if last_used_extended:
# last_used_extended_value = None
else:
drop_expected_range = True
for anomaly_index in anomaly_timestamps_indices:
if index > anomaly_index:
# if index < (anomaly_index + 30):
if index < (anomaly_index + breach_for):
use_extended = False
anomalous_period = 1
break
extended_lower = three_sigma_lower
extended_upper = three_sigma_upper
if use_extended:
if item[1] > three_sigma_upper:
extended_lower = three_sigma_lower
extended_upper = (item[1] + ((item[1] / 100) * 5))
last_breach = index
last_breach_vector = 'positive'
elif item[1] < three_sigma_lower:
extended_lower = (item[1] - ((item[1] / 100) * 5))
extended_upper = three_sigma_upper
last_breach = index
last_breach_vector = 'negative'
elif index < (last_breach + breach_for) and index > last_breach:
if last_breach_vector == 'positive':
extended_value = (item[1] + ((item[1] / 100) * 5))
three_sigma_value = three_sigma_upper
if three_sigma_value > extended_value:
extended_value = (three_sigma_value + ((three_sigma_value / 100) * 5))
extended_lower = three_sigma_lower
extended_upper = extended_value
else:
extended_lower = (item[1] - ((item[1] / 100) * 5))
extended_upper = three_sigma_upper
if drop_expected_range:
use_extended = False
if last_breach_vector == 'positive':
extended_lower = three_sigma_lower - (three_sigma_upper * 0.1)
extended_upper = item[1] - (item[1] * 0.1)
if last_breach_vector == 'negative':
extended_lower = three_sigma_lower - (three_sigma_lower * 0.1)
extended_upper = item[1] + (item[1] * 0.1)
else:
extended_lower = three_sigma_lower
extended_upper = three_sigma_upper
if drop_expected_range:
use_extended = False
if last_breach_vector == 'positive':
extended_lower = three_sigma_lower - (three_sigma_upper * 0.1)
extended_upper = item[1] - (item[1] * 0.1)
if last_breach_vector == 'negative':
extended_lower = three_sigma_lower - (three_sigma_lower * 0.1)
extended_upper = item[1] + (item[1] * 0.1)
else:
extended_lower = three_sigma_lower
extended_upper = three_sigma_upper
if drop_expected_range:
use_extended = False
if last_breach_vector == 'positive':
extended_lower = three_sigma_lower - (three_sigma_upper * 0.1)
extended_upper = item[1] - (item[1] * 0.1)
if last_breach_vector == 'negative':
extended_lower = three_sigma_lower - (three_sigma_lower * 0.1)
extended_upper = item[1] + (item[1] * 0.1)
# extended_values.append([extended_lower, extended_upper])
lower = extended_lower
upper = extended_upper
if index in sorted(list(set(anomalous_period_indices))):
if index in anomalies_indices:
continue
for idx in anomaly_timestamps_indices:
if (index + 3) == idx:
a_top = extended_upper + (extended_upper * 0.1)
top.append(a_top)
a_bottom = extended_lower - (extended_lower * 0.1)
bottom.append(a_bottom)
a_left = item[0]
left.append(a_left)
if (index - 4) == idx:
a_right = item[0]
right.append(a_right)
# @modified 20201126 - Feature #3850: webapp - yhat_values API endoint
# Change dict key to int not float
int_ts = int(ts)
yhat_dict[int_ts] = {}
if include_value:
yhat_dict[int_ts]['value'] = value
if include_mean:
yhat_dict[int_ts]['mean'] = va_mean
if include_mean:
yhat_dict[int_ts]['mean'] = va_mean
# @modified 20210201 - Task #3958: Handle secondary algorithms in yhat_values
# yhat_lower = va_mean - va_std_3
yhat_lower = lower
yhat_upper = upper
if include_yhat_real_lower:
# @modified 20201202 - Feature #3850: webapp - yhat_values API endoint
# Set the yhat_real_lower correctly
# if yhat_lower < array_amin and array_amin == 0:
# yhat_dict[int_ts]['yhat_real_lower'] = array_amin
if yhat_lower < 0 and array_amin > -0.0000000001:
yhat_dict[int_ts]['yhat_real_lower'] = 0
else:
yhat_dict[int_ts]['yhat_real_lower'] = yhat_lower
yhat_dict[int_ts]['yhat_lower'] = yhat_lower
# @modified 20210201 - Task #3958: Handle secondary algorithms in yhat_values
yhat_dict[int_ts]['yhat_upper'] = va_mean + va_std_3
yhat_dict[int_ts]['yhat_upper'] = upper
# @added 20210201 - Task #3958: Handle secondary algorithms in yhat_values
if use_extended:
if yhat_lower != three_sigma_lower:
yhat_dict[int_ts]['3sigma_lower'] = three_sigma_lower
if yhat_upper != three_sigma_upper:
yhat_dict[int_ts]['3sigma_upper'] = three_sigma_upper
if include_anomalous_periods:
yhat_dict[int_ts]['anomalous_period'] = anomalous_period
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed create yhat_dict for %s' % (
metric))
return None
logger.info('get_yhat_values :: calculated yhat values for %s data points' % str(len(yhat_dict)))
if yhat_dict:
yhat_dict_cache_key = 'webapp.%s.%s.%s.%s.%s.%s' % (
metric, str(from_timestamp), str(until_timestamp),
str(include_value), str(include_mean),
str(include_yhat_real_lower))
logger.info('get_yhat_values :: saving yhat_dict to Redis key - %s' % yhat_dict_cache_key)
try:
REDIS_CONN.setex(yhat_dict_cache_key, 14400, str(yhat_dict))
logger.info('get_yhat_values :: created Redis key - %s with 14400 TTL' % yhat_dict_cache_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed to setex Redis key - %s' % yhat_dict_cache_key)
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
# Add rectangle coordinates that describe anomalous periods
anomalous_periods_dict['rectangles'] = {}
anomalous_periods_dict['rectangles']['top'] = top
anomalous_periods_dict['rectangles']['bottom'] = bottom
anomalous_periods_dict['rectangles']['left'] = left
anomalous_periods_dict['rectangles']['right'] = right
if anomalous_periods_dict:
yhat_anomalous_periods_dict_cache_key = 'webapp.%s.%s.%s.%s.%s.%s.anomalous_periods' % (
metric, str(from_timestamp), str(until_timestamp),
str(include_value), str(include_mean),
str(include_yhat_real_lower))
logger.info('get_yhat_values :: saving yhat_dict to Redis key - %s' % yhat_anomalous_periods_dict_cache_key)
try:
REDIS_CONN.setex(yhat_anomalous_periods_dict_cache_key, 14400, str(yhat_anomalous_periods_dict_cache_key))
logger.info('get_yhat_values :: created Redis key - %s with 14400 TTL' % yhat_anomalous_periods_dict_cache_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed to setex Redis key - %s' % yhat_dict_cache_key)
# @modified 20210201 - Task #3958: Handle secondary algorithms in yhat_values
# return yhat_dict
return yhat_dict, anomalous_periods_dict
# @added 20210326 - Feature #3994: Panorama - mirage not anomalous
def get_mirage_not_anomalous_metrics(
metric=None, from_timestamp=None, until_timestamp=None,
anomalies=False):
"""
Determine mirage not anomalous metrics from mirage.panorama.not_anomalous_metrics
and ionosphere.panorama.not_anomalous_metrics
:param metric: base_name
:param from_timestamp: the from_timestamp
:param until_timestamp: the until_timestamp
:param anomalies: whether to report anomalies as well
:type metric: str
:type from_timestamp: int
:type until_timestamp: int
:type anomalies: boolean
:return: (dict, dict)
:rtype: tuple
"""
import datetime
# fail_msg = None
# trace = None
current_date = datetime.datetime.now().date()
current_date_str = '%s 00:00' % str(current_date)
# from_timestamp_date_str = current_date_str
# until_timestamp_date_str = current_date_str
until_timestamp = str(int(time.time()))
base_name = None
if 'metric' in request.args:
base_name = request.args.get('metric', None)
if base_name == 'all':
base_name = None
if metric:
base_name = metric
if not from_timestamp and 'from_timestamp' in request.args:
from_timestamp = request.args.get('from_timestamp', None)
if from_timestamp == 'today':
# from_timestamp_date_str = current_date_str
# @modified 20211021 - handle multiple date formats
try:
new_from_timestamp = time.mktime(datetime.datetime.strptime(current_date_str, '%Y-%m-%d %H:%M').timetuple())
except ValueError:
new_from_timestamp = time.mktime(datetime.datetime.strptime(current_date_str, '%Y-%m-%d %H:%M').timetuple())
except Exception as err:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: panorama_request :: failed to unix timestamp from current_date_str - %s' % str(err)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
from_timestamp = str(int(new_from_timestamp))
if from_timestamp and from_timestamp != 'today':
if ":" in from_timestamp:
# try:
# datetime_object = datetime.datetime.strptime(from_timestamp, '%Y-%m-%d %H:%M')
# except:
# # Handle old format
# datetime_object = datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M')
# from_timestamp_date_str = str(datetime_object.date())
# @modified 20211021 - handle multiple date formats
try:
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y-%m-%d %H:%M').timetuple())
except ValueError:
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
except Exception as err:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: panorama_request :: failed to unix timestamp from from_timestamp - %s' % str(err)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
from_timestamp = str(int(new_from_timestamp))
else:
# from_timestamp_date_str = current_date_str
# @modified 20211021 - handle multiple date formats
try:
new_from_timestamp = time.mktime(datetime.datetime.strptime(current_date_str, '%Y-%m-%d %H:%M').timetuple())
except ValueError:
new_from_timestamp = time.mktime(datetime.datetime.strptime(current_date_str, '%Y%m%d %H:%M').timetuple())
except Exception as err:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: panorama_request :: failed to unix timestamp from current_date_str - %s' % str(err)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
from_timestamp = str(int(new_from_timestamp))
if not until_timestamp and 'until_timestamp' in request.args:
until_timestamp = request.args.get('until_timestamp', None)
if until_timestamp == 'all':
# until_timestamp_date_str = current_date_str
until_timestamp = str(int(time.time()))
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
# datetime_object = datetime.datetime.strptime(until_timestamp, '%Y-%m-%d %H:%M')
# until_timestamp_date_str = str(datetime_object.date())
# @modified 20211021 - handle multiple date formats
try:
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y-%m-%d %H:%M').timetuple())
except ValueError:
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
except Exception as err:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: panorama_request :: failed to unix timestamp from until_timestamp - %s' % str(err)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
until_timestamp = str(int(new_until_timestamp))
else:
# until_timestamp_date_str = current_date_str
until_timestamp = str(int(time.time()))
get_anomalies = False
if 'anomalies' in request.args:
anomalies_str = request.args.get('anomalies', 'false')
if anomalies_str == 'true':
get_anomalies = True
logger.info(
'get_mirage_not_anomalous_metrics - also determining anomalies for %s' % (
str(base_name)))
logger.info(
'get_mirage_not_anomalous_metrics - base_name: %s, from_timestamp: %s, until_timestamp: %s' % (
str(base_name), str(from_timestamp), str(until_timestamp)))
redis_hash = 'mirage.panorama.not_anomalous_metrics'
mirage_panorama_not_anomalous = {}
try:
REDIS_CONN_DECODED = get_redis_conn_decoded(skyline_app)
mirage_panorama_not_anomalous = REDIS_CONN_DECODED.hgetall(redis_hash)
logger.info('get_mirage_not_anomalous_metrics :: %s entries to check in the %s Redis hash key' % (
str(len(mirage_panorama_not_anomalous)), redis_hash))
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed to get Redis hash key %s' % redis_hash)
mirage_panorama_not_anomalous = {}
all_timestamp_float_strings = []
if mirage_panorama_not_anomalous:
all_timestamp_float_strings = list(mirage_panorama_not_anomalous.keys())
timestamp_floats = []
if all_timestamp_float_strings:
for timestamp_float_string in all_timestamp_float_strings:
if int(float(timestamp_float_string)) >= int(from_timestamp):
if int(float(timestamp_float_string)) <= int(until_timestamp):
timestamp_floats.append(timestamp_float_string)
not_anomalous_dict = {}
not_anomalous_count = 0
for timestamp_float_string in timestamp_floats:
try:
timestamp_float_dict = literal_eval(mirage_panorama_not_anomalous[timestamp_float_string])
for i_metric in list(timestamp_float_dict.keys()):
if base_name:
if base_name != i_metric:
continue
try:
metric_dict = not_anomalous_dict[i_metric]
except:
metric_dict = {}
not_anomalous_dict[i_metric] = {}
not_anomalous_dict[i_metric]['from'] = int(from_timestamp)
not_anomalous_dict[i_metric]['until'] = int(until_timestamp)
not_anomalous_dict[i_metric]['timestamps'] = {}
metric_timestamp = timestamp_float_dict[i_metric]['timestamp']
try:
metric_timestamp_dict = not_anomalous_dict[i_metric]['timestamps'][metric_timestamp]
except:
not_anomalous_dict[i_metric]['timestamps'][metric_timestamp] = {}
metric_timestamp_dict = {}
if not metric_timestamp_dict:
not_anomalous_dict[i_metric]['timestamps'][metric_timestamp]['value'] = timestamp_float_dict[i_metric]['value']
not_anomalous_dict[i_metric]['timestamps'][metric_timestamp]['hours_to_resolve'] = timestamp_float_dict[i_metric]['hours_to_resolve']
not_anomalous_count += 1
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed iterate mirage_panorama_not_anomalous entry')
logger.info(
'get_mirage_not_anomalous_metrics - not_anomalous_count: %s, for base_name: %s' % (
str(not_anomalous_count), str(base_name)))
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous.
redis_hash = 'ionosphere.panorama.not_anomalous_metrics'
ionosphere_panorama_not_anomalous = {}
try:
REDIS_CONN_DECODED = get_redis_conn_decoded(skyline_app)
ionosphere_panorama_not_anomalous = REDIS_CONN_DECODED.hgetall(redis_hash)
logger.info('get_mirage_not_anomalous_metrics :: %s entries to check in the %s Redis hash key' % (
str(len(ionosphere_panorama_not_anomalous)), redis_hash))
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed to get Redis hash key %s' % redis_hash)
ionosphere_panorama_not_anomalous = {}
ionosphere_all_timestamp_float_strings = []
if ionosphere_panorama_not_anomalous:
ionosphere_all_timestamp_float_strings = list(ionosphere_panorama_not_anomalous.keys())
ionosphere_timestamp_floats = []
if all_timestamp_float_strings:
for timestamp_float_string in ionosphere_all_timestamp_float_strings:
if int(float(timestamp_float_string)) >= int(from_timestamp):
if int(float(timestamp_float_string)) <= int(until_timestamp):
ionosphere_timestamp_floats.append(timestamp_float_string)
for timestamp_float_string in ionosphere_timestamp_floats:
try:
timestamp_float_dict = literal_eval(ionosphere_panorama_not_anomalous[timestamp_float_string])
for i_metric in list(timestamp_float_dict.keys()):
if base_name:
if base_name != i_metric:
continue
try:
metric_dict = not_anomalous_dict[i_metric]
except:
metric_dict = {}
not_anomalous_dict[i_metric] = {}
not_anomalous_dict[i_metric]['from'] = int(from_timestamp)
not_anomalous_dict[i_metric]['until'] = int(until_timestamp)
not_anomalous_dict[i_metric]['timestamps'] = {}
del metric_dict
metric_timestamp = timestamp_float_dict[i_metric]['timestamp']
try:
metric_timestamp_dict = not_anomalous_dict[i_metric]['timestamps'][metric_timestamp]
except:
not_anomalous_dict[i_metric]['timestamps'][metric_timestamp] = {}
metric_timestamp_dict = {}
if not metric_timestamp_dict:
not_anomalous_dict[i_metric]['timestamps'][metric_timestamp]['value'] = timestamp_float_dict[i_metric]['value']
not_anomalous_dict[i_metric]['timestamps'][metric_timestamp]['hours_to_resolve'] = timestamp_float_dict[i_metric]['hours_to_resolve']
not_anomalous_count += 1
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed iterate ionosphere_panorama_not_anomalous entry')
logger.info(
'get_mirage_not_anomalous_metrics - not_anomalous_count: %s (with ionosphere), for base_name: %s' % (
str(not_anomalous_count), str(base_name)))
anomalies_dict = {}
if get_anomalies:
for i_metric in list(not_anomalous_dict.keys()):
metric_id = None
query = 'SELECT id FROM metrics WHERE metric=\'%s\'' % i_metric
try:
results = mysql_select(skyline_app, query)
for item in results:
metric_id = int(item[0])
break
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: querying MySQL - %s' % query)
query_start_str = 'SELECT anomaly_timestamp,anomalous_datapoint,anomaly_end_timestamp,full_duration FROM anomalies'
if metric_id:
query = '%s WHERE metric_id=%s AND anomaly_timestamp > %s AND anomaly_timestamp < %s' % (
query_start_str, metric_id, str(from_timestamp), str(until_timestamp))
else:
query = '%s WHERE anomaly_timestamp > %s AND anomaly_timestamp < %s' % (
query_start_str, str(from_timestamp), str(until_timestamp))
anomalies = []
try:
results = mysql_select(skyline_app, query)
for item in results:
anomalies.append([i_metric, int(item[0]), float(item[1]), float(item[2]), round(int(item[3]) / 3600)])
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: querying MySQL - %s' % query)
anomalies_dict[i_metric] = {}
anomalies_dict[i_metric]['from'] = int(from_timestamp)
anomalies_dict[i_metric]['until'] = int(until_timestamp)
anomalies_dict[i_metric]['timestamps'] = {}
if anomalies:
for a_metric, timestamp, value, anomaly_end_timestamp, hours_to_resolve in anomalies:
anomalies_dict[i_metric]['timestamps'][timestamp] = {}
anomalies_dict[i_metric]['timestamps'][timestamp]['value'] = value
anomalies_dict[i_metric]['timestamps'][timestamp]['hours_to_resolve'] = hours_to_resolve
anomalies_dict[i_metric]['timestamps'][timestamp]['end_timestamp'] = anomaly_end_timestamp
# @added 20210328 - Feature #3994: Panorama - mirage not anomalous
# Save key to use in not_anomalous_metric
not_anomalous_dict_key = 'panorama.not_anomalous_dict.%s.%s' % (
str(from_timestamp), str(until_timestamp))
not_anomalous_dict_key_ttl = 600
if base_name:
not_anomalous_dict_key = 'panorama.not_anomalous_dict.%s.%s.%s' % (
str(from_timestamp), str(until_timestamp), base_name)
not_anomalous_dict_key_ttl = 600
try:
REDIS_CONN.setex(not_anomalous_dict_key, not_anomalous_dict_key_ttl, str(not_anomalous_dict))
logger.info('get_mirage_not_anomalous_metrics :: created Redis key - %s with %s TTL' % (
not_anomalous_dict_key, str(not_anomalous_dict_key_ttl)))
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed to created Redis key - %s with %s TTL' % (
not_anomalous_dict_key, str(not_anomalous_dict_key_ttl)))
if not base_name:
recent_not_anomalous_dict_key = 'panorama.not_anomalous_dict.recent'
recent_not_anomalous_dict_key_ttl = 180
try:
REDIS_CONN.setex(recent_not_anomalous_dict_key, recent_not_anomalous_dict_key_ttl, str(not_anomalous_dict))
logger.info('get_mirage_not_anomalous_metrics :: created Redis key - %s with %s TTL' % (
recent_not_anomalous_dict_key, str(recent_not_anomalous_dict_key_ttl)))
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed to created Redis key - %s with %s TTL' % (
recent_not_anomalous_dict_key, str(recent_not_anomalous_dict_key_ttl)))
anomalies_dict_key = 'panorama.anomalies_dict.%s.%s' % (
str(from_timestamp), str(until_timestamp))
anomalies_dict_key_ttl = 600
if base_name:
anomalies_dict_key = 'panorama.anomalies_dict.%s.%s.%s' % (
str(from_timestamp), str(until_timestamp), base_name)
anomalies_dict_key_ttl = 600
try:
REDIS_CONN.setex(anomalies_dict_key, anomalies_dict_key_ttl, str(anomalies_dict))
logger.info('get_mirage_not_anomalous_metrics :: created Redis key - %s with %s TTL' % (
anomalies_dict_key, str(anomalies_dict_key_ttl)))
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed to created Redis key - %s with %s TTL' % (
anomalies_dict_key, str(anomalies_dict_key_ttl)))
if not base_name:
recent_anomalies_dict_key = 'panorama.not_anomalous_dict.recent'
recent_anomalies_dict_key_ttl = 180
try:
REDIS_CONN.setex(recent_anomalies_dict_key, recent_anomalies_dict_key_ttl, str(anomalies_dict))
logger.info('get_mirage_not_anomalous_metrics :: created Redis key - %s with %s TTL' % (
recent_anomalies_dict_key, str(recent_anomalies_dict_key_ttl)))
except:
logger.error(traceback.format_exc())
logger.error('error :: get_mirage_not_anomalous_metrics :: failed to created Redis key - %s with %s TTL' % (
recent_anomalies_dict_key, str(recent_anomalies_dict_key_ttl)))
return not_anomalous_dict, anomalies_dict
# @added 20210328 - Feature #3994: Panorama - mirage not anomalous
def plot_not_anomalous_metric(not_anomalous_dict, anomalies_dict, plot_type):
"""
Plot the metric not anomalous or anomalies graph and return the file path
:param not_anomalous_dict: the dictionary of not anomalous events for the
metric
:param anomalies_dict: the dictionary of anomalous events for the
metric
:type not_anomalous_dict: dict
:type anomalies_dict: dict
:type plot_type: str ('not_anomalous' or 'anomalies')
:return: path and filename
:rtype: str
"""
fail_msg = None
trace = None
metric = None
from_timestamp = None
until_timestamp = None
try:
metric = list(not_anomalous_dict.keys())[0]
from_timestamp = not_anomalous_dict[metric]['from']
until_timestamp = not_anomalous_dict[metric]['until']
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: plot_not_anomalous_metric :: failed to get details for plot from not_anomalous_dict'
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
if not metric or not from_timestamp or not until_timestamp or not plot_type:
fail_msg = 'error :: plot_not_anomalous_metric :: failed to get details for plot'
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
try:
timeseries = get_graphite_metric(
skyline_app, metric, from_timestamp, until_timestamp, 'list',
'object')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: plot_not_anomalous_metric :: failed to get timeseries from Graphite for details for %s' % metric
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
if plot_type == 'not_anomalous':
data_dict = not_anomalous_dict
if plot_type == 'anomalies':
data_dict = anomalies_dict
plot_timestamps = list(data_dict[metric]['timestamps'].keys())
logger.info('plot_not_anomalous_metric :: building not %s timeseries' % plot_type)
plot_timeseries = []
last_timestamp = None
a_timestamps_done = []
for timestamp, value in timeseries:
anomaly = 0
if not last_timestamp:
last_timestamp = int(timestamp)
plot_timeseries.append([int(timestamp), anomaly])
continue
for a_timestamp in plot_timestamps:
if a_timestamp < last_timestamp:
continue
if a_timestamp > int(timestamp):
continue
if a_timestamp in a_timestamps_done:
continue
if a_timestamp in list(range(last_timestamp, int(timestamp))):
anomaly = 1
a_timestamps_done.append(a_timestamp)
plot_timeseries.append([int(timestamp), anomaly])
logger.info('plot_not_anomalous_metric :: created %s timeseries' % plot_type)
logger.info('plot_not_anomalous_metric :: creating timeseries dataframe')
try:
df = pd.DataFrame(timeseries, columns=['date', 'value'])
df['date'] = pd.to_datetime(df['date'], unit='s')
datetime_index = pd.DatetimeIndex(df['date'].values)
df = df.set_index(datetime_index)
df.drop('date', axis=1, inplace=True)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: plot_not_anomalous_metric :: failed create timeseries dataframe to plot %s' % metric
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
logger.info('plot_not_anomalous_metric :: creating %s dataframe' % plot_type)
try:
plot_df = pd.DataFrame(plot_timeseries, columns=['date', 'value'])
plot_df['date'] = pd.to_datetime(plot_df['date'], unit='s')
datetime_index = pd.DatetimeIndex(plot_df['date'].values)
plot_df = plot_df.set_index(datetime_index)
plot_df.drop('date', axis=1, inplace=True)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: plot_not_anomalous_metric :: failed create not anomalous dataframe to plot %s' % metric
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
try:
logger.info('plot_not_anomalous_metric :: loading plot from adtk.visualization')
from adtk.visualization import plot
sane_metricname = filesafe_metricname(str(metric))
save_to_file = '%s/panorama/not_anomalous/%s/%s.%s.%s.%s.png' % (
settings.SKYLINE_TMP_DIR, sane_metricname, plot_type,
str(from_timestamp),
str(until_timestamp), sane_metricname)
save_to_path = path.dirname(save_to_file)
if plot_type == 'not_anomalous':
title = 'Not anomalous analysis\n%s' % metric
if plot_type == 'anomalies':
title = 'Anomalies\n%s' % metric
if not path.exists(save_to_path):
try:
mkdir_p(save_to_path)
except Exception as e:
logger.error('error :: plot_not_anomalous_metric :: failed to create dir - %s - %s' % (
save_to_path, e))
if path.exists(save_to_path):
try:
logger.info('plot_not_anomalous_metric :: plotting')
if plot_type == 'not_anomalous':
plot(
df, anomaly=plot_df, anomaly_color='green', title=title,
ts_markersize=1, anomaly_alpha=0.4, legend=False,
save_to_file=save_to_file)
if plot_type == 'anomalies':
plot(
df, anomaly=plot_df, anomaly_color='red', title=title,
ts_markersize=1, anomaly_alpha=1, legend=False,
save_to_file=save_to_file)
logger.debug('debug :: plot_not_anomalous_metric :: plot saved to - %s' % (
save_to_file))
except Exception as e:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: plot_not_anomalous_metric :: failed to plot - %s: %s' % (str(metric), e)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: plot_not_anomalous_metric :: plotting %s for %s' % (str(metric), plot_type)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
if not path.isfile(save_to_file):
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: plot_not_anomalous_metric :: plotting %s for %s to %s failed' % (
str(metric), plot_type, save_to_file)
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
else:
try:
REDIS_CONN.hset('panorama.not_anomalous_plots', time.time(), save_to_file)
logger.info('plot_not_anomalous_metric :: set Redis hash in panorama.not_anomalous_plots for clean up')
except:
logger.error(traceback.format_exc())
logger.error('error :: plot_not_anomalous_metric :: failed to set save_to_file in Redis hash - panorama.not_anomalous_plots')
return save_to_file
# @added 20210617 - Feature #4144: webapp - stale_metrics API endpoint
# Feature #4076: CUSTOM_STALE_PERIOD
# Branch #1444: thunder
def namespace_stale_metrics(namespace, cluster_data, exclude_sparsely_populated):
"""
Plot the metric not anomalous or anomalies graph and return the file path
:param not_anomalous_dict: the dictionary of not anomalous events for the
metric
:param anomalies_dict: the dictionary of anomalous events for the
metric
:type not_anomalous_dict: dict
:type anomalies_dict: dict
:type plot_type: str ('not_anomalous' or 'anomalies')
:return: path and filename
:rtype: str
"""
fail_msg = None
trace = None
namespaces_namespace_stale_metrics_dict = {}
namespaces_namespace_stale_metrics_dict['stale_metrics'] = {}
unique_base_names = []
try:
REDIS_CONN_DECODED = get_redis_conn_decoded(skyline_app)
unique_base_names = list(REDIS_CONN_DECODED.smembers('aet.analyzer.unique_base_names'))
logger.info('%s namespaces checked for stale metrics discovered with thunder_stale_metrics' % (
str(len(unique_base_names))))
except Exception as e:
fail_msg = 'error :: Webapp error with api?stale_metrics - %s' % e
logger.error(fail_msg)
raise
now = int(time.time())
namespace_stale_metrics_dict = {}
namespace_recovered_metrics_dict = {}
try:
namespace_stale_metrics_dict, namespace_recovered_metrics_dict = thunder_stale_metrics(skyline_app, log=True)
except Exception as e:
fail_msg = 'error :: Webapp error with api?stale_metrics - %s' % e
logger.error(fail_msg)
raise
logger.info('%s namespaces checked for stale metrics discovered with thunder_stale_metrics' % (
str(len(namespace_stale_metrics_dict))))
remote_stale_metrics_dicts = []
if settings.REMOTE_SKYLINE_INSTANCES and cluster_data:
exclude_sparsely_populated_str = 'false'
if exclude_sparsely_populated:
exclude_sparsely_populated_str = 'true'
remote_namespaces_namespace_stale_metrics_dicts = []
stale_metrics_uri = 'stale_metrics=true&namespace=%s&exclude_sparsely_populated=%s' % (
str(namespace), str(exclude_sparsely_populated_str))
try:
remote_namespaces_namespace_stale_metrics_dicts = get_cluster_data(stale_metrics_uri, 'stale_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: Webapp could not get remote_namespaces_namespace_stale_metrics_dict from the remote Skyline instances')
if remote_namespaces_namespace_stale_metrics_dicts:
logger.info('got %s remote namespace_stale_metrics_dicts instances from the remote Skyline instances' % str(len(remote_namespaces_namespace_stale_metrics_dicts)))
remote_stale_metrics_dicts = remote_namespaces_namespace_stale_metrics_dicts
stale_metrics_count = 0
total_metrics_count = len(unique_base_names)
if namespace == 'all':
namespaces_namespace_stale_metrics_dict['stale_metrics']['namespace'] = 'all'
if remote_stale_metrics_dicts:
for remote_stale_metrics_dict in remote_stale_metrics_dicts:
total_metrics_count = total_metrics_count + remote_stale_metrics_dict['total_metrics_count']
namespaces_namespace_stale_metrics_dict['stale_metrics']['total_metrics_count'] = total_metrics_count
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'] = {}
if namespace_stale_metrics_dict:
for parent_namespace in list(namespace_stale_metrics_dict.keys()):
for base_name in list(namespace_stale_metrics_dict[parent_namespace]['metrics'].keys()):
stale_metrics_count += 1
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name] = {}
last_timestamp = namespace_stale_metrics_dict[parent_namespace]['metrics'][base_name]
stale_for = now - int(float(last_timestamp))
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['last_timestamp'] = last_timestamp
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['stale_for'] = stale_for
if remote_stale_metrics_dicts:
for remote_stale_metrics_dict in remote_stale_metrics_dicts:
for base_name in list(remote_stale_metrics_dict['stale_metrics'].keys()):
stale_metrics_count += 1
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name] = {}
last_timestamp = remote_stale_metrics_dict['stale_metrics'][base_name]['last_timestamp']
stale_for = remote_stale_metrics_dict['stale_metrics'][base_name]['stale_for']
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['last_timestamp'] = last_timestamp
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['stale_for'] = stale_for
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics_count'] = stale_metrics_count
if namespace_stale_metrics_dict and namespace != 'all':
namespaces_namespace_stale_metrics_dict['stale_metrics']['namespace'] = namespace
total_metrics_count = len([base_name for base_name in unique_base_names if base_name.startswith(namespace)])
if remote_stale_metrics_dicts:
for remote_stale_metrics_dict in remote_stale_metrics_dicts:
total_metrics_count = total_metrics_count + remote_stale_metrics_dict['total_metrics_count']
namespaces_namespace_stale_metrics_dict['stale_metrics']['total_metrics_count'] = total_metrics_count
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'] = {}
top_level_namespace = namespace.split('.')[0]
if namespace_stale_metrics_dict:
for parent_namespace in list(namespace_stale_metrics_dict.keys()):
if parent_namespace != top_level_namespace:
continue
for base_name in list(namespace_stale_metrics_dict[parent_namespace]['metrics'].keys()):
if not base_name.startswith(namespace):
continue
stale_metrics_count += 1
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name] = {}
last_timestamp = namespace_stale_metrics_dict[parent_namespace]['metrics'][base_name]
stale_for = now - int(float(last_timestamp))
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['last_timestamp'] = last_timestamp
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['stale_for'] = stale_for
if remote_stale_metrics_dicts:
for remote_stale_metrics_dict in remote_stale_metrics_dicts:
for base_name in list(remote_stale_metrics_dict['stale_metrics'].keys()):
stale_metrics_count += 1
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name] = {}
last_timestamp = remote_stale_metrics_dict['stale_metrics'][base_name]['last_timestamp']
stale_for = remote_stale_metrics_dict['stale_metrics'][base_name]['stale_for']
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['last_timestamp'] = last_timestamp
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics'][base_name]['stale_for'] = stale_for
namespaces_namespace_stale_metrics_dict['stale_metrics']['stale_metrics_count'] = stale_metrics_count
return namespaces_namespace_stale_metrics_dict
|
from .BaseRelationship import BaseRelationship
class MorphTo(BaseRelationship):
_morph_map = {}
def __init__(self, fn, morph_key="record_type", morph_id="record_id"):
if isinstance(fn, str):
self.fn = fn = None
self.morph_key = fn
self.morph_id = morph_key
else:
self.fn = fn
self.morph_id = morph_id
self.morph_key = morph_key
def get_builder(self):
return self._related_builder
def set_keys(self, owner, attribute):
self.morph_id = self.morph_id or "record_id"
self.morph_key = self.morph_key or "record_type"
return self
def __get__(self, instance, owner):
"""This method is called when the decorated method is accessed.
Arguments:
instance {object|None} -- The instance we called.
If we didn't call the attribute and only accessed it then this will be None.
owner {object} -- The current model that the property was accessed on.
Returns:
object -- Either returns a builder or a hydrated model.
"""
self._related_builder = instance.builder
self.set_keys(owner, self.fn)
if instance.is_loaded():
if self.morph_key in instance._relationships:
return instance._relationships[self.morph_key]
result = self.apply_query(self._related_builder, instance)
return result
else:
return self
def __getattr__(self, attribute):
relationship = self.fn(self)()
return getattr(relationship.builder, attribute)
def apply_query(self, builder, instance):
"""Apply the query and return a dictionary to be hydrated
Arguments:
builder {oject} -- The relationship object
instance {object} -- The current model oject.
Returns:
dict -- A dictionary of data which will be hydrated.
"""
model = self.morph_map().get(instance.__attributes__[self.morph_key])
record = instance.__attributes__[self.morph_id]
# return
return model.where(model.get_primary_key(), record).first()
def get_related(self, query, relation, eagers=None):
"""Gets the relation needed between the relation and the related builder. If the relation is a collection
then will need to pluck out all the keys from the collection and fetch from the related builder. If
relation is just a Model then we can just call the model based on the value of the related
builders primary key.
Args:
relation (Model|Collection):
Returns:
Model|Collection
"""
raise NotImplementedError
def register_related(self, key, model, collection):
raise NotImplementedError
def morph_map(self):
return self._morph_map
@classmethod
def set_morph_map(cls, morph_map):
cls._morph_map = morph_map
return cls
|
"""
The tsnet.utils.valve_curve contains function to define
valve characteristics curve, gate valve by default.
"""
import numpy as np
def valve_curve(s, coeff=None):
"""Define valve curve
Parameters
----------
s : float
open percentage
valve : str, optional
[description], by default 'Gate'
Returns
-------
k : float
Friction coefficient with given open percentage
"""
if coeff== None:
percent_open = np.linspace(100,0,11)
# loss coefficients for a gate valve
kl = [1/0.2, 2.50, 1.25, 0.625, 0.333, 0.17,
0.100, 0.0556, 0.0313, 0.0167, 0.0]
k = np.interp(s, percent_open[::-1], kl[::-1])
else:
percent_open, kl = coeff
k = np.interp(s, percent_open[::-1], kl[::-1])
return k
|
import xml.etree.ElementTree as ET
import requests
COUNTS_URL = "http://ligand-expo.rcsb.org/dictionaries/cc-counts.tdd"
RESIDUES_URL = "http://ligand-expo.rcsb.org/reports/{0}/{1}/{1}.xml"
MIN_COUNT = 500
PDB_ORDERS = {"sing": 1, "doub": 2, "trip": 3, "quad": 4}
response = requests.get(COUNTS_URL)
assert response.status_code == requests.status_codes.codes.OK
print("extern ResiduesTable const mol::internal::RESIDUES_TABLE({")
for line in response.text.splitlines()[1:]:
resname, count = line.split()
if int(count) < MIN_COUNT:
break
if resname == "UNX":
continue
response = requests.get(RESIDUES_URL.format(resname[0], resname))
assert response.status_code == requests.status_codes.codes.OK
xml_root = ET.fromstring(response.text)
name = xml_root.findall("{*}chem_compCategory/{*}chem_comp/{*}name")[0].text
atoms = []
atom_ids = {}
for i, atom_tag in enumerate(xml_root.iterfind("{*}chem_comp_atomCategory/*")):
atom_ids[atom_tag.attrib["atom_id"]] = i
atoms.append('{{"{}", {:d}}}'.format(atom_tag.attrib["atom_id"], i))
bonds = []
for bond_tag in xml_root.iterfind("{*}chem_comp_bondCategory/*"):
atom1 = atom_ids[bond_tag.attrib["atom_id_1"]]
atom2 = atom_ids[bond_tag.attrib["atom_id_2"]]
bond_order = PDB_ORDERS.get(bond_tag.find("{*}value_order").text, 1)
if bond_tag.find("{*}pdbx_aromatic_flag").text == "Y":
is_aromatic = "true"
else:
is_aromatic = "false"
bonds.append('{{{}, {:d}, {:d}, {:d}}}'.format(is_aromatic, bond_order, atom1, atom2))
print(" // {}".format(name))
print(' {{"{}",'.format(resname))
print(" // Atoms")
print(" {{", end="")
print(",\n ".join(atoms), end="")
print("},")
print(" // Bonds")
print(" {", end="")
print(",\n ".join(bonds), end="")
print("}}},")
print("});")
|
from pathlib import Path
import textwrap
import numpy as np
import random
import matplotlib.pyplot as plt
from typing import List, AnyStr, Tuple
plt.style.use('seaborn')
medium_font = {'family': 'serif',
'name': 'Helvetica',
'size': 16}
large_font = {'family': 'serif',
'name': 'Helvetica',
'weight': 'bold',
'size': 18}
def color_map(num: int, cmap: str):
cm = plt.get_cmap(cmap)
return [cm(1. * i / num) for i in range(num)]
class Plotter:
def __init__(self, out_path: str, fig_width: int = 20, fig_height: int = 10):
self.out_path = Path(out_path)
self.fig_size = (fig_width, fig_height)
if not self.out_path.exists():
self.out_path.mkdir()
plt.figure(figsize=self.fig_size)
def save(self, file_name: str):
plt.savefig(f"{self.out_path / Path(file_name)}", bbox_inches='tight')
plt.clf()
def pie(self, data: List[int], labels: List[AnyStr], cmap: str, filename: str = 'pie'):
labels = ['\n'.join(textwrap.wrap(cn, width=25)) if len(cn) > 25 else cn for cn in labels]
pairs = list(zip(data, labels))
random.shuffle(pairs)
data, labels = zip(*pairs)
fig, ax = plt.subplots(figsize=(20, 10), subplot_kw=dict(aspect="equal"))
colors = color_map(len(data), cmap)
ax.set_prop_cycle(color=colors)
explode = [0.05] * len(labels)
wedges, texts, autotexts = ax.pie(data, wedgeprops=dict(width=0.4), startangle=-30, explode=explode,
autopct="%.1f%%", pctdistance=0.85)
plt.setp(autotexts, size=20, weight="bold")
bbox_props = dict(boxstyle="square,pad=0.2", fc="w", ec="k", lw=0.8)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
if data[i] < 2:
continue
ang = (p.theta2 - p.theta1) / 1.5 + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(labels[i], xy=(x, y), xytext=(1.15 * np.sign(x), 1.15 * y), rotation_mode="anchor",
horizontalalignment=horizontalalignment, **kw, fontsize=22)
#ax.set_title("Benchmark's CWEs Composition", fontsize=20, weight="bold")
self.save(filename)
# source: https://medium.com/@arseniytyurin/how-to-make-your-histogram-shine-69e432be39ca
def histogram(self, data: List[int], title: str, x_label: str, y_label: str, cmap: str, filename: str = 'hist',
binwidth: float = 1):
fig, ax = plt.subplots(figsize=(20, 10))
bins = np.arange(min(data), max(data) + binwidth, binwidth)
n, bins, patches = ax.hist(data, bins=bins, facecolor='#2ab0ff', edgecolor='#e0e0e0', rwidth=1, linewidth=1, alpha=0.8, align='left')
colors = color_map(len(patches), cmap)
# Good old loop. Choose colormap of your taste
for i, color in enumerate(colors):
patches[i].set_facecolor(color)
height = patches[i].get_height()
if height > 0:
ax.annotate(f'{int(height)}', xy=(patches[i].get_x() + patches[i].get_width() / 2, height),
xytext=(0, 5), textcoords='offset points', ha='center', va='bottom', **medium_font)
# Add title and labels with custom font sizes
plt.title(title, **large_font)
if bins[-1] > 1000:
plt.xticks(bins, **medium_font, rotation=-30, ha="right")
else:
plt.xticks(bins, **medium_font)
plt.yticks(**medium_font)
plt.xlabel(x_label, fontsize=10, **large_font)
plt.ylabel(y_label, fontsize=10, **large_font)
self.save(filename)
|
def main():
age = int(input('How old are you? '))
is_citizen = (input('Are you a citizen? Y or N ').lower() == 'y')
if age >= 21 and is_citizen:
print('You can vote and drink.')
elif age >= 21:
print('You can drink, but can\'t vote.')
elif age >= 18 and is_citizen:
print('You can vote, but can\'t drink.')
else:
print('You cannot vote or drink.')
main() |
class Solution:
def findKthPositive(self, arr: List[int], k: int) -> int:
target = 1
i = 0
while i < len(arr):
if arr[i] == target:
i += 1
else:
k -= 1
if k == 0:
return target
target += 1
return arr[-1] + k
|
import configparser
import argparse
import sys
import os
sys.path.append(os.path.dirname(__file__))
import modules.deamon as daemon
def main():
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("command", help="command, given to daemon. Possible options: start|stop|restart", type=str)
parser.add_argument("-c", "--config", help="config file path", type=str)
args = parser.parse_args()
# Get app paths and parse config file
config = configparser.ConfigParser()
if args.config:
config_path = args.config
else:
config_path = os.path.abspath(os.path.join('/etc', 'system-health-daemon', 'daemon.cfg'))
with open(config_path, 'r') as file:
config.read_file(file)
daemon_obj = daemon.CollectorDaemon(config['Collector'])
if args.command == 'start':
daemon_obj.start()
elif args.command == 'stop':
daemon_obj.stop()
elif args.command == 'restart':
daemon_obj.restart()
else:
print(args.help)
sys.exit(2)
if __name__ == "__main__":
main() |
cities = [
'Accord',
'Acra',
'Adams',
'Adams Basin',
'Adams Center',
'Addison',
'Adirondack',
'Afton',
'Akron',
'Alabama',
'Albany',
'Albertson',
'Albion',
'Alcove',
'Alden',
'Alder Creek',
'Alexander',
'Alexandria Bay',
'Alfred',
'Alfred Station',
'Allegany',
'Allentown',
'Alma',
'Almond',
'Alpine',
'Alplaus',
'Altamont',
'Altmar',
'Alton',
'Altona',
'Amagansett',
'Amawalk',
'Amenia',
'Amityville',
'Amsterdam',
'Ancram',
'Ancramdale',
'Andes',
'Andover',
'Angelica',
'Angola',
'Annandale On Hudson',
'Antwerp',
'Apalachin',
'Appleton',
'Apulia Station',
'Aquebogue',
'Arcade',
'Arden',
'Ardsley',
'Ardsley On Hudson',
'Argyle',
'Arkport',
'Arkville',
'Armonk',
'Arverne',
'Ashland',
'Ashville',
'Astoria',
'Athens',
'Athol',
'Athol Springs',
'Atlanta',
'Atlantic Beach',
'Attica',
'Au Sable Forks',
'Auburn',
'Auriesville',
'Aurora',
'Austerlitz',
'Ava',
'Averill Park',
'Avoca',
'Avon',
'Babylon',
'Bainbridge',
'Bakers Mills',
'Baldwin',
'Baldwin Place',
'Baldwinsville',
'Ballston Lake',
'Ballston Spa',
'Bangall',
'Barker',
'Barneveld',
'Barrytown',
'Barryville',
'Barton',
'Basom',
'Batavia',
'Bath',
'Bay Shore',
'Bayport',
'Bayside',
'Bayville',
'Beacon',
'Bear Mountain',
'Bearsville',
'Beaver Dams',
'Beaver Falls',
'Bedford',
'Bedford Hills',
'Belfast',
'Bellerose',
'Belleville',
'Bellmore',
'Bellona',
'Bellport',
'Bellvale',
'Belmont',
'Bemus Point',
'Bergen',
'Berkshire',
'Berlin',
'Berne',
'Bernhards Bay',
'Bethel',
'Bethpage',
'Bible School Park',
'Big Flats',
'Big Indian',
'Billings',
'Binghamton',
'Black Creek',
'Black River',
'Blauvelt',
'Bliss',
'Blodgett Mills',
'Bloomfield',
'Blooming Grove',
'Bloomingburg',
'Bloomingdale',
'Bloomington',
'Bloomville',
'Blossvale',
'Blue Mountain Lake',
'Blue Point',
'Bohemia',
'Boiceville',
'Bolivar',
'Bolton Landing',
'Bombay',
'Boonville',
'Boston',
'Bouckville',
'Bovina Center',
'Bowmansville',
'Bradford',
'Brainard',
'Brainardsville',
'Branchport',
'Brant',
'Brant Lake',
'Brantingham',
'Brasher Falls',
'Breesport',
'Breezy Point',
'Brentwood',
'Brewerton',
'Brewster',
'Briarcliff Manor',
'Bridgehampton',
'Bridgeport',
'Bridgewater',
'Brier Hill',
'Brightwaters',
'Broadalbin',
'Brockport',
'Brocton',
'Bronx',
'Bronxville',
'Brookfield',
'Brookhaven',
'Brooklyn',
'Brooktondale',
'Brownville',
'Brushton',
'Buchanan',
'Buffalo',
'Bullville',
'Burdett',
'Burke',
'Burlingham',
'Burlington Flats',
'Burnt Hills',
'Burt',
'Buskirk',
'Byron',
'Cadyville',
'Cairo',
'Calcium',
'Caledonia',
'Callicoon',
'Callicoon Center',
'Calverton',
'Cambria Heights',
'Cambridge',
'Camden',
'Cameron',
'Cameron Mills',
'Camillus',
'Campbell',
'Campbell Hall',
'Canaan',
'Canadice',
'Canajoharie',
'Canandaigua',
'Canaseraga',
'Canastota',
'Candor',
'Caneadea',
'Canisteo',
'Canton',
'Cape Vincent',
'Carle Place',
'Carlisle',
'Carmel',
'Caroga Lake',
'Carthage',
'Cassadaga',
'Cassville',
'Castile',
'Castle Creek',
'Castle Point',
'Castleton On Hudson',
'Castorland',
'Cato',
'Catskill',
'Cattaraugus',
'Cayuga',
'Cayuta',
'Cazenovia',
'Cedarhurst',
'Celoron',
'Center Moriches',
'Centereach',
'Centerport',
'Centerville',
'Central Bridge',
'Central Islip',
'Central Square',
'Central Valley',
'Ceres',
'Chadwicks',
'Chaffee',
'Champlain',
'Chappaqua',
'Charlotteville',
'Chase Mills',
'Chateaugay',
'Chatham',
'Chaumont',
'Chautauqua',
'Chazy',
'Chelsea',
'Chemung',
'Chenango Bridge',
'Chenango Forks',
'Cherry Creek',
'Cherry Plain',
'Cherry Valley',
'Chester',
'Chestertown',
'Chichester',
'Childwold',
'Chippewa Bay',
'Chittenango',
'Churchville',
'Churubusco',
'Cicero',
'Cincinnatus',
'Circleville',
'Clarence',
'Clarence Center',
'Clarendon',
'Clark Mills',
'Clarkson',
'Clarksville',
'Claryville',
'Claverack',
'Clay',
'Clayton',
'Clayville',
'Clemons',
'Cleveland',
'Cleverdale',
'Clifton Park',
'Clifton Springs',
'Climax',
'Clinton',
'Clinton Corners',
'Clintondale',
'Clockville',
'Clyde',
'Clymer',
'Cobleskill',
'Cochecton',
'Cochecton Center',
'Coeymans',
'Coeymans Hollow',
'Cohocton',
'Cohoes',
'Cold Brook',
'Cold Spring',
'Cold Spring Harbor',
'Colden',
'Colesville',
'College Point',
'Colliersville',
'Collins',
'Collins Center',
'Colton',
'Columbiaville',
'Commack',
'Comstock',
'Conesus',
'Conewango Valley',
'Congers',
'Conklin',
'Connelly',
'Constable',
'Constableville',
'Constantia',
'Coopers Plains',
'Cooperstown',
'Copake',
'Copake Falls',
'Copenhagen',
'Copiague',
'Coram',
'Corbettsville',
'Corfu',
'Corinth',
'Corning',
'Cornwall',
'Cornwall On Hudson',
'Cornwallville',
'Corona',
'Cortland',
'Cortlandt Manor',
'Cossayuna',
'Cottekill',
'Cowlesville',
'Coxsackie',
'Cragsmoor',
'Cranberry Lake',
'Craryville',
'Crittenden',
'Croghan',
'Crompond',
'Cropseyville',
'Cross River',
'Croton Falls',
'Croton On Hudson',
'Crown Point',
'Cuba',
'Cuddebackville',
'Cutchogue',
'Dale',
'Dalton',
'Dannemora',
'Dansville',
'Darien Center',
'Davenport',
'Davenport Center',
'Dayton',
'De Kalb Junction',
'De Lancey',
'De Peyster',
'De Ruyter',
'Deansboro',
'Deer Park',
'Deer River',
'Deferiet',
'Delanson',
'Delevan',
'Delhi',
'Delmar',
'Delphi Falls',
'Denmark',
'Denver',
'Depauville',
'Depew',
'Deposit',
'Derby',
'Dewittville',
'Dexter',
'Diamond Point',
'Dickinson Center',
'Dobbs Ferry',
'Dolgeville',
'Dormansville',
'Dover Plains',
'Downsville',
'Dresden',
'Dryden',
'Duanesburg',
'Dundee',
'Dunkirk',
'Durham',
'Durhamville',
'Eagle Bay',
'Eagle Bridge',
'Earlton',
'Earlville',
'East Amherst',
'East Aurora',
'East Berne',
'East Bethany',
'East Bloomfield',
'East Branch',
'East Chatham',
'East Concord',
'East Durham',
'East Elmhurst',
'East Greenbush',
'East Hampton',
'East Homer',
'East Islip',
'East Jewett',
'East Marion',
'East Meadow',
'East Meredith',
'East Moriches',
'East Nassau',
'East Northport',
'East Norwich',
'East Otto',
'East Pembroke',
'East Pharsalia',
'East Quogue',
'East Randolph',
'East Rochester',
'East Rockaway',
'East Schodack',
'East Setauket',
'East Springfield',
'East Syracuse',
'East Williamson',
'East Worcester',
'Eastchester',
'Eastport',
'Eaton',
'Eden',
'Edmeston',
'Edwards',
'Elba',
'Elbridge',
'Eldred',
'Elizabethtown',
'Elizaville',
'Elka Park',
'Ellenburg',
'Ellenburg Center',
'Ellenburg Depot',
'Ellenville',
'Ellicottville',
'Ellington',
'Ellisburg',
'Elma',
'Elmhurst',
'Elmira',
'Elmont',
'Elmsford',
'Endicott',
'Endwell',
'Erieville',
'Erin',
'Esopus',
'Esperance',
'Essex',
'Etna',
'Evans Mills',
'Fabius',
'Fair Haven',
'Fairport',
'Falconer',
'Fallsburg',
'Fancher',
'Far Rockaway',
'Farmersville Station',
'Farmingdale',
'Farmington',
'Farmingville',
'Farnham',
'Fayette',
'Fayetteville',
'Felts Mills',
'Ferndale',
'Feura Bush',
'Fillmore',
'Findley Lake',
'Fine',
'Fishers',
'Fishers Island',
'Fishers Landing',
'Fishkill',
'Fishs Eddy',
'Fleischmanns',
'Floral Park',
'Florida',
'Flushing',
'Fly Creek',
'Fonda',
'Forest Hills',
'Forestburgh',
'Forestport',
'Forestville',
'Fort Ann',
'Fort Covington',
'Fort Drum',
'Fort Edward',
'Fort Hunter',
'Fort Johnson',
'Fort Montgomery',
'Fort Plain',
'Frankfort',
'Franklin',
'Franklin Springs',
'Franklin Square',
'Franklinville',
'Fredonia',
'Freedom',
'Freehold',
'Freeport',
'Freeville',
'Fremont Center',
'Fresh Meadows',
'Frewsburg',
'Friendship',
'Fulton',
'Fultonham',
'Fultonville',
'Gabriels',
'Gainesville',
'Gallupville',
'Galway',
'Gansevoort',
'Garden City',
'Gardiner',
'Garnerville',
'Garrattsville',
'Garrison',
'Gasport',
'Geneseo',
'Geneva',
'Genoa',
'Georgetown',
'Germantown',
'Gerry',
'Getzville',
'Ghent',
'Gilbertsville',
'Gilboa',
'Glasco',
'Glen Aubrey',
'Glen Cove',
'Glen Head',
'Glen Oaks',
'Glen Spey',
'Glen Wild',
'Glenfield',
'Glenford',
'Glenham',
'Glenmont',
'Glens Falls',
'Glenwood',
'Glenwood Landing',
'Gloversville',
'Godeffroy',
'Goldens Bridge',
'Gorham',
'Goshen',
'Gouverneur',
'Gowanda',
'Grafton',
'Grahamsville',
'Grand Gorge',
'Grand Island',
'Granite Springs',
'Granville',
'Great Bend',
'Great Neck',
'Great River',
'Great Valley',
'Greene',
'Greenfield Center',
'Greenfield Park',
'Greenhurst',
'Greenlawn',
'Greenport',
'Greenvale',
'Greenville',
'Greenwich',
'Greenwood',
'Greenwood Lake',
'Greig',
'Groton',
'Groveland',
'Guilderland',
'Guilderland Center',
'Guilford',
'Hadley',
'Hagaman',
'Hague',
'Hailesboro',
'Haines Falls',
'Halcottsville',
'Hall',
'Hamburg',
'Hamden',
'Hamilton',
'Hamlin',
'Hammond',
'Hammondsport',
'Hampton',
'Hampton Bays',
'Hancock',
'Hankins',
'Hannacroix',
'Hannawa Falls',
'Hannibal',
'Harford',
'Harpersfield',
'Harpursville',
'Harriman',
'Harris',
'Harrison',
'Harrisville',
'Hartford',
'Hartsdale',
'Hartwick',
'Hastings',
'Hastings On Hudson',
'Hauppauge',
'Haverstraw',
'Hawthorne',
'Hector',
'Helena',
'Hemlock',
'Hempstead',
'Henderson',
'Henderson Harbor',
'Henrietta',
'Hensonville',
'Herkimer',
'Hermon',
'Heuvelton',
'Hewlett',
'Hicksville',
'High Falls',
'Highland',
'Highland Falls',
'Highland Lake',
'Highland Mills',
'Highmount',
'Hillburn',
'Hillsdale',
'Hilton',
'Himrod',
'Hinckley',
'Hinsdale',
'Hobart',
'Hoffmeister',
'Hogansburg',
'Holbrook',
'Holland',
'Holland Patent',
'Holley',
'Hollis',
'Hollowville',
'Holmes',
'Holtsville',
'Homer',
'Honeoye',
'Honeoye Falls',
'Hoosick',
'Hoosick Falls',
'Hopewell Junction',
'Hornell',
'Horseheads',
'Hortonville',
'Houghton',
'Howard Beach',
'Howells',
'Howes Cave',
'Hubbardsville',
'Hudson',
'Hudson Falls',
'Hughsonville',
'Huguenot',
'Huletts Landing',
'Hume',
'Hunt',
'Hunter',
'Huntington',
'Huntington Station',
'Hurley',
'Hurleyville',
'Hyde Park',
'Ilion',
'Indian Lake',
'Inlet',
'Interlaken',
'Inwood',
'Ionia',
'Irving',
'Irvington',
'Island Park',
'Islandia',
'Islip',
'Islip Terrace',
'Ithaca',
'Jackson Heights',
'Jacksonville',
'Jamaica',
'Jamesport',
'Jamestown',
'Jamesville',
'Jasper',
'Java Center',
'Java Village',
'Jay',
'Jefferson',
'Jefferson Valley',
'Jeffersonville',
'Jericho',
'Jewett',
'Johnsburg',
'Johnson',
'Johnson City',
'Johnsonville',
'Johnstown',
'Jordan',
'Jordanville',
'Kanona',
'Katonah',
'Kattskill Bay',
'Kauneonga Lake',
'Keene',
'Keene Valley',
'Keeseville',
'Kendall',
'Kennedy',
'Kenoza Lake',
'Kent',
'Kerhonkson',
'Keuka Park',
'Kew Gardens',
'Kiamesha Lake',
'Kill Buck',
'Killawog',
'Kinderhook',
'King Ferry',
'Kings Park',
'Kingston',
'Kirkville',
'Kirkwood',
'Knowlesville',
'Knox',
'Knoxboro',
'La Fargeville',
'La Fayette',
'La Grange',
'Lacona',
'Lagrangeville',
'Lake Clear',
'Lake George',
'Lake Grove',
'Lake Hill',
'Lake Huntington',
'Lake Katrine',
'Lake Luzerne',
'Lake Peekskill',
'Lake Placid',
'Lake Pleasant',
'Lake View',
'Lakemont',
'Lakeville',
'Lakewood',
'Lancaster',
'Lanesville',
'Lansing',
'Larchmont',
'Latham',
'Laurel',
'Laurens',
'Lawrence',
'Lawrenceville',
'Lawtons',
'Le Roy',
'Lee Center',
'Leeds',
'Leicester',
'Leon',
'Leonardsville',
'Levittown',
'Lewis',
'Lewisboro',
'Lewiston',
'Lexington',
'Liberty',
'Lily Dale',
'Lima',
'Limerick',
'Limestone',
'Lincolndale',
'Lindenhurst',
'Lindley',
'Linwood',
'Lisbon',
'Lisle',
'Little Falls',
'Little Genesee',
'Little Neck',
'Little Valley',
'Little York',
'Liverpool',
'Livingston',
'Livingston Manor',
'Livonia',
'Livonia Center',
'Loch Sheldrake',
'Locke',
'Lockport',
'Lockwood',
'Locust Valley',
'Lodi',
'Long Beach',
'Long Eddy',
'Long Island City',
'Long Lake',
'Lorraine',
'Lowman',
'Lowville',
'Lycoming',
'Lynbrook',
'Lyndonville',
'Lyon Mountain',
'Lyons',
'Lyons Falls',
'Macedon',
'Machias',
'Madison',
'Madrid',
'Mahopac',
'Mahopac Falls',
'Maine',
'Malden Bridge',
'Malden On Hudson',
'Mallory',
'Malone',
'Malverne',
'Mamaroneck',
'Manchester',
'Manhasset',
'Manlius',
'Mannsville',
'Manorville',
'Maple Springs',
'Maple View',
'Maplecrest',
'Marathon',
'Marcellus',
'Marcy',
'Margaretville',
'Marietta',
'Marilla',
'Marion',
'Marlboro',
'Martinsburg',
'Martville',
'Maryknoll',
'Maryland',
'Masonville',
'Maspeth',
'Massapequa',
'Massapequa Park',
'Massena',
'Mastic',
'Mastic Beach',
'Mattituck',
'Maybrook',
'Mayfield',
'Mayville',
'McConnellsville',
'McDonough',
'McGraw',
'McLean',
'Mechanicville',
'Mecklenburg',
'Medford',
'Medina',
'Medusa',
'Mellenville',
'Melrose',
'Melville',
'Memphis',
'Mendon',
'Meridale',
'Meridian',
'Merrick',
'Mexico',
'Mid Hudson',
'Mid Island',
'Middle Falls',
'Middle Granville',
'Middle Grove',
'Middle Island',
'Middle Village',
'Middleburgh',
'Middleport',
'Middlesex',
'Middletown',
'Middleville',
'Milford',
'Mill Neck',
'Millbrook',
'Miller Place',
'Millerton',
'Millport',
'Millwood',
'Milton',
'Mineola',
'Minerva',
'Minetto',
'Mineville',
'Minoa',
'Model City',
'Modena',
'Mohawk',
'Mohegan Lake',
'Moira',
'Mongaup Valley',
'Monroe',
'Monsey',
'Montauk',
'Montezuma',
'Montgomery',
'Monticello',
'Montour Falls',
'Montrose',
'Mooers',
'Mooers Forks',
'Moravia',
'Moriah',
'Moriah Center',
'Moriches',
'Morris',
'Morrisonville',
'Morristown',
'Morrisville',
'Morton',
'Mottville',
'Mount Kisco',
'Mount Marion',
'Mount Morris',
'Mount Sinai',
'Mount Tremper',
'Mount Upton',
'Mount Vernon',
'Mount Vision',
'Mountain Dale',
'Mountainville',
'Mumford',
'Munnsville',
'Nanuet',
'Napanoch',
'Naples',
'Narrowsburg',
'Nassau',
'Natural Bridge',
'Nedrow',
'Nelliston',
'Nesconset',
'Neversink',
'New Baltimore',
'New Berlin',
'New City',
'New Hampton',
'New Hartford',
'New Haven',
'New Hyde Park',
'New Kingston',
'New Lebanon',
'New Lisbon',
'New Milford',
'New Paltz',
'New Rochelle',
'New Russia',
'New Suffolk',
'New Windsor',
'New Woodstock',
'New York',
'New York Mills',
'Newark',
'Newark Valley',
'Newburgh',
'Newcomb',
'Newfane',
'Newfield',
'Newport',
'Newton Falls',
'Newtonville',
'Niagara Falls',
'Niagara University',
'Nichols',
'Nicholville',
'Nineveh',
'Niobe',
'Niverville',
'Norfolk',
'North Babylon',
'North Bangor',
'North Bay',
'North Blenheim',
'North Boston',
'North Branch',
'North Brookfield',
'North Chatham',
'North Chili',
'North Collins',
'North Creek',
'North Evans',
'North Granville',
'North Greece',
'North Hoosick',
'North Hudson',
'North Java',
'North Lawrence',
'North Norwich',
'North Pitcher',
'North River',
'North Rose',
'North Salem',
'North Tonawanda',
'Northport',
'Northville',
'Norwich',
'Norwood',
'Nunda',
'Nyack',
'Oak Hill',
'Oakdale',
'Oakfield',
'Oakland Gardens',
'Oaks Corners',
'Obernburg',
'Ocean Beach',
'Oceanside',
'Odessa',
'Ogdensburg',
'Olcott',
'Old Bethpage',
'Old Chatham',
'Old Forge',
'Old Westbury',
'Olean',
'Olivebridge',
'Olmstedville',
'Oneida',
'Oneonta',
'Ontario',
'Ontario Center',
'Orangeburg',
'Orchard Park',
'Orient',
'Oriskany',
'Oriskany Falls',
'Orwell',
'Ossining',
'Oswegatchie',
'Oswego',
'Otego',
'Otisville',
'Otto',
'Ouaquaga',
'Ovid',
'Owego',
'Owls Head',
'Oxbow',
'Oxford',
'Oyster Bay',
'Ozone Park',
'Painted Post',
'Palatine Bridge',
'Palenville',
'Palisades',
'Palmyra',
'Panama',
'Paradox',
'Parish',
'Parishville',
'Parksville',
'Patchogue',
'Patterson',
'Pattersonville',
'Paul Smiths',
'Pavilion',
'Pawling',
'Pearl River',
'Peconic',
'Peekskill',
'Pelham',
'Penfield',
'Penn Yan',
'Pennellville',
'Perkinsville',
'Perry',
'Perrysburg',
'Peru',
'Peterboro',
'Petersburg',
'Pharsalia',
'Phelps',
'Philadelphia',
'Phillipsport',
'Philmont',
'Phoenicia',
'Phoenix',
'Piercefield',
'Piermont',
'Pierrepont Manor',
'Piffard',
'Pike',
'Pine Bush',
'Pine City',
'Pine Hill',
'Pine Island',
'Pine Plains',
'Pine Valley',
'Piseco',
'Pitcher',
'Pittsford',
'Plainview',
'Plainville',
'Plattekill',
'Plattsburgh',
'Pleasant Valley',
'Pleasantville',
'Plessis',
'Plymouth',
'Poestenkill',
'Point Lookout',
'Poland',
'Pomona',
'Pompey',
'Pond Eddy',
'Poplar Ridge',
'Port Byron',
'Port Chester',
'Port Crane',
'Port Ewen',
'Port Gibson',
'Port Henry',
'Port Jefferson',
'Port Jefferson Station',
'Port Jervis',
'Port Kent',
'Port Leyden',
'Port Washington',
'Portage',
'Portageville',
'Porter Corners',
'Portland',
'Portlandville',
'Portville',
'Potsdam',
'Pottersville',
'Poughkeepsie',
'Poughquag',
'Pound Ridge',
'Prattsburgh',
'Prattsville',
'Preble',
'Preston Hollow',
'Prospect',
'Pulaski',
'Pulteney',
'Pultneyville',
'Purchase',
'Purdys',
'Purling',
'Putnam Station',
'Putnam Valley',
'Pyrites',
'Quaker Street',
'Queens Village',
'Queensbury',
'Quogue',
'Rainbow Lake',
'Randolph',
'Ransomville',
'Raquette Lake',
'Ravena',
'Ray Brook',
'Raymondville',
'Reading Center',
'Red Creek',
'Red Hook',
'Redfield',
'Redford',
'Redwood',
'Rego Park',
'Remsen',
'Remsenburg',
'Rensselaer',
'Rensselaer Falls',
'Rensselaerville',
'Retsof',
'Rexford',
'Rexville',
'Rhinebeck',
'Rhinecliff',
'Richburg',
'Richfield Springs',
'Richford',
'Richland',
'Richmond Hill',
'Richmondville',
'Richville',
'Ridge',
'Ridgewood',
'Rifton',
'Riparius',
'Ripley',
'Riverhead',
'Rochester',
'Rock City Falls',
'Rock Hill',
'Rock Stream',
'Rock Tavern',
'Rockaway Park',
'Rockville Centre',
'Rocky Point',
'Rodman',
'Rome',
'Romulus',
'Ronkonkoma',
'Roosevelt',
'Rooseveltown',
'Roscoe',
'Rose',
'Roseboom',
'Rosedale',
'Rosendale',
'Roslyn',
'Roslyn Heights',
'Rossburg',
'Rotterdam Junction',
'Round Lake',
'Round Top',
'Rouses Point',
'Roxbury',
'Ruby',
'Rush',
'Rushford',
'Rushville',
'Russell',
'Rye',
'Sabael',
'Sackets Harbor',
'Sag Harbor',
'Sagaponack',
'Saint Albans',
'Saint Bonaventure',
'Saint James',
'Saint Johnsville',
'Saint Regis Falls',
'Salamanca',
'Salem',
'Salisbury Center',
'Salisbury Mills',
'Salt Point',
'Sanborn',
'Sand Lake',
'Sandusky',
'Sandy Creek',
'Sangerfield',
'Saranac',
'Saranac Lake',
'Saratoga Springs',
'Sardinia',
'Saugerties',
'Sauquoit',
'Savannah',
'Savona',
'Sayville',
'Scarsdale',
'Schaghticoke',
'Schenectady',
'Schenevus',
'Schodack Landing',
'Schoharie',
'Schroon Lake',
'Schuyler Falls',
'Schuyler Lake',
'Schuylerville',
'Scio',
'Scipio Center',
'Scottsburg',
'Scottsville',
'Sea Cliff',
'Seaford',
'Selden',
'Selkirk',
'Seneca Castle',
'Seneca Falls',
'Severance',
'Shandaken',
'Sharon Springs',
'Shelter Island',
'Shelter Island Heights',
'Shenorock',
'Sherburne',
'Sheridan',
'Sherman',
'Sherrill',
'Shinhopple',
'Shirley',
'Shokan',
'Shoreham',
'Shortsville',
'Shrub Oak',
'Shushan',
'Sidney',
'Sidney Center',
'Silver Bay',
'Silver Creek',
'Silver Lake',
'Silver Springs',
'Sinclairville',
'Skaneateles',
'Skaneateles Falls',
'Slate Hill',
'Slaterville Springs',
'Slingerlands',
'Sloansville',
'Sloatsburg',
'Smallwood',
'Smithboro',
'Smithtown',
'Smithville Flats',
'Smyrna',
'Sodus',
'Sodus Point',
'Solsville',
'Somers',
'Sonyea',
'Sound Beach',
'South Bethlehem',
'South Butler',
'South Byron',
'South Cairo',
'South Colton',
'South Dayton',
'South Fallsburg',
'South Glens Falls',
'South Jamesport',
'South Kortright',
'South Lima',
'South New Berlin',
'South Otselic',
'South Ozone Park',
'South Plymouth',
'South Richmond Hill',
'South Rutland',
'South Salem',
'South Schodack',
'South Wales',
'Southampton',
'Southfields',
'Southold',
'Sparkill',
'Sparrow Bush',
'Speculator',
'Spencer',
'Spencerport',
'Spencertown',
'Speonk',
'Sprakers',
'Spring Brook',
'Spring Glen',
'Spring Valley',
'Springfield Center',
'Springfield Gardens',
'Springville',
'Springwater',
'Staatsburg',
'Stafford',
'Stamford',
'Stanfordville',
'Stanley',
'Star Lake',
'Staten Island',
'Steamburg',
'Stella Niagara',
'Stephentown',
'Sterling',
'Sterling Forest',
'Stillwater',
'Stittville',
'Stockton',
'Stone Ridge',
'Stony Brook',
'Stony Creek',
'Stony Point',
'Stormville',
'Stottville',
'Stow',
'Stratford',
'Strykersville',
'Stuyvesant',
'Stuyvesant Falls',
'Suffern',
'Sugar Loaf',
'Summit',
'Summitville',
'Sundown',
'Sunnyside',
'Surprise',
'Swain',
'Swan Lake',
'Sylvan Beach',
'Syosset',
'Syracuse',
'Taberg',
'Tallman',
'Tannersville',
'Tappan',
'Tarrytown',
'Thendara',
'Theresa',
'Thiells',
'Thompson Ridge',
'Thompsonville',
'Thornwood',
'Thousand Island Park',
'Three Mile Bay',
'Ticonderoga',
'Tillson',
'Tioga Center',
'Tivoli',
'Tomkins Cove',
'Tonawanda',
'Treadwell',
'Tribes Hill',
'Troupsburg',
'Trout Creek',
'Troy',
'Trumansburg',
'Truxton',
'Tuckahoe',
'Tully',
'Tunnel',
'Tupper Lake',
'Turin',
'Tuxedo Park',
'Tyrone',
'Ulster Park',
'Unadilla',
'Union Hill',
'Union Springs',
'Uniondale',
'Unionville',
'Upper Jay',
'Upton',
'Utica',
'Vails Gate',
'Valatie',
'Valhalla',
'Valley Cottage',
'Valley Falls',
'Valley Stream',
'Van Buren Point',
'Van Etten',
'Van Hornesville',
'Varysburg',
'Verbank',
'Vermontville',
'Vernon',
'Vernon Center',
'Verona',
'Verona Beach',
'Verplanck',
'Versailles',
'Vestal',
'Victor',
'Victory Mills',
'Voorheesville',
'Waccabuc',
'Waddington',
'Wading River',
'Wainscott',
'Walden',
'Wales Center',
'Walker Valley',
'Wallkill',
'Walton',
'Walworth',
'Wampsville',
'Wanakena',
'Wantagh',
'Wappingers Falls',
'Warners',
'Warnerville',
'Warrensburg',
'Warsaw',
'Warwick',
'Washington Mills',
'Washingtonville',
'Wassaic',
'Water Mill',
'Waterford',
'Waterloo',
'Waterport',
'Watertown',
'Waterville',
'Watervliet',
'Watkins Glen',
'Waverly',
'Wawarsing',
'Wayland',
'Wayne',
'Webb',
'Webster',
'Weedsport',
'Wellesley Island',
'Wells',
'Wells Bridge',
'Wellsburg',
'Wellsville',
'West Babylon',
'West Bloomfield',
'West Burlington',
'West Camp',
'West Chazy',
'West Clarksville',
'West Copake',
'West Coxsackie',
'West Davenport',
'West Eaton',
'West Edmeston',
'West Falls',
'West Fulton',
'West Harrison',
'West Haverstraw',
'West Hempstead',
'West Henrietta',
'West Hurley',
'West Islip',
'West Kill',
'West Lebanon',
'West Leyden',
'West Monroe',
'West Nyack',
'West Oneonta',
'West Park',
'West Point',
'West Sand Lake',
'West Sayville',
'West Shokan',
'West Stockholm',
'West Valley',
'West Winfield',
'Westbrookville',
'Westbury',
'Westdale',
'Westerlo',
'Westernville',
'Westfield',
'Westford',
'Westhampton',
'Westhampton Beach',
'Westmoreland',
'Westons Mills',
'Westport',
'Westtown',
'Wevertown',
'Whippleville',
'White Lake',
'White Plains',
'White Sulphur Springs',
'Whitehall',
'Whitesboro',
'Whitestone',
'Whitestown',
'Whitesville',
'Whitney Point',
'Willard',
'Willet',
'Williamson',
'Williamstown',
'Williston Park',
'Willow',
'Willsboro',
'Willseyville',
'Wilmington',
'Wilson',
'Wilton',
'Windham',
'Windsor',
'Wingdale',
'Winthrop',
'Witherbee',
'Wolcott',
'Woodbourne',
'Woodbury',
'Woodgate',
'Woodhaven',
'Woodhull',
'Woodmere',
'Woodridge',
'Woodside',
'Woodstock',
'Worcester',
'Wurtsboro',
'Wyandanch',
'Wykagyl',
'Wynantskill',
'Wyoming',
'Yaphank',
'Yonkers',
'York',
'Yorkshire',
'Yorktown Heights',
'Yorkville',
'Youngstown',
'Youngsville',
'Yulan'
]
|
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from scipy.interpolate import interp1d
from pyseir.load_data import load_public_implementations_data
from pyseir.inference import fit_results
# Fig 4 of Imperial college.
# https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-Europe-estimates-and-NPI-impact-30-03-2020.pdf
# These are intended to act indendently, as shown by a multivariate fit from Imp. College. The exception is lockdown which supercedes everything.
distancing_measure_suppression = {
'stay_at_home': .48,
'50_gatherings': .05,
'500_gatherings': .02, # Made this one up since not examined. Assume it isn't very effective at county level, esp. relative to 50 gatherings
'self_isolation': 0.05, # This one is not included in the policies dataset , but is in the imperial college paper. Keep it around for now..
'public_schools': .18, # Total social distancing was about
'entertainment_gym': 0.02,
'restaurant_dine-in': 0.03,
'federal_guidelines': 0.03 # Making this up as well. Probably not very effective relative to stay at home...
}
def generate_triggered_suppression_model(t_list, lockdown_days, open_days, reduction=0.25, start_on=0):
"""
Generates a contact reduction model which switches a binary supression
policy on and off.
Parameters
----------
t_list: array-like
List of times.
lockdown_days: int
Days of reduced contact rate.
open_days:
Days of high contact rate.
start_on: int
Start the lockdown fluctuation after X days.
Returns
-------
suppression_model: callable
suppression_model(t) returns the current suppression model at time t.
"""
state = 'lockdown'
state_switch = start_on + lockdown_days
rho = []
if lockdown_days == 0:
rho = np.ones(len(t_list))
elif open_days == 0:
rho = np.ones(len(t_list)) * reduction
else:
for t in t_list:
if t >= state_switch:
if state == 'open':
state = 'lockdown'
state_switch += lockdown_days
elif state == 'lockdown':
state = 'open'
state_switch += open_days
if state == 'open':
rho.append(1)
elif state == 'lockdown':
rho.append(reduction)
rho = np.array(rho)
rho[t_list < start_on] = 1
return interp1d(t_list, rho, fill_value='extrapolate')
def generate_empirical_distancing_policy(t_list, fips, future_suppression):
"""
Produce a suppression policy based on Imperial College estimates of social
distancing programs combined with County level datasets about their
implementation.
Parameters
----------
t_list: array-like
List of times to interpolate over.
fips: str
County fips to lookup interventions against.
future_suppression: float
The suppression level to apply in an ongoing basis after today, and
going backward as the lockdown / stay-at-home efficacy.
Returns
-------
suppression_model: callable
suppression_model(t) returns the current suppression model at time t.
"""
t0 = fit_results.load_t0(fips)
rho = []
# Check for fips that don't match.
public_implementations = load_public_implementations_data().set_index('fips')
# Not all counties present in this dataset.
if fips not in public_implementations.index:
# Then assume 1.0 until today and then future_suppression going forward.
for t_step in t_list:
t_actual = t0 + timedelta(days=t_step)
if t_actual <= datetime.now():
rho.append(1.0)
else:
rho.append(future_suppression)
else:
policies = public_implementations.loc[fips].to_dict()
for t_step in t_list:
t_actual = t0 + timedelta(days=t_step)
rho_this_t = 1
# If this is a future date, assume lockdown continues.
if t_actual > datetime.utcnow():
rho.append(future_suppression)
continue
# If the policy was enacted on this timestep then activate it in
# addition to others. These measures are additive unless lockdown is
# instituted.
for independent_measure in ['public_schools',
'entertainment_gym',
'restaurant_dine-in',
'federal_guidelines']:
if not pd.isnull(policies[independent_measure]) and t_actual > \
policies[independent_measure]:
rho_this_t -= distancing_measure_suppression[
independent_measure]
# Only take the max of these, since 500 doesn't matter if 50 is enacted.
if not pd.isnull(policies['50_gatherings']) and t_actual > policies['50_gatherings']:
rho_this_t -= distancing_measure_suppression['50_gatherings']
elif not pd.isnull(policies['500_gatherings']) and t_actual > policies['500_gatherings']:
rho_this_t -= distancing_measure_suppression['500_gatherings']
# If lockdown, then we don't care about any others, just set to
# future suppression.
if pd.isnull(policies['stay_at_home']) and t_actual > policies['stay_at_home']:
rho_this_t = future_suppression
rho.append(rho_this_t)
return interp1d(t_list, rho, fill_value='extrapolate')
def piecewise_parametric_policy(x, t_list):
"""
Generate a piecewise suppression policy over n_days based on interval
splits at levels passed and according to the split_power_law.
Parameters
----------
x: array(float)
x[0]: split_power_law
The splits are generated based on relative proportions of
t ** split_power_law. Hence split_power_law = 0 is evenly spaced.
x[1:]: suppression_levels: array-like
Series of suppression levels that will be equally strewn across.
t_list: array-like
List of days over which the period.
Returns
-------
policy: callable
Interpolator for the suppression policy.
"""
split_power_law = x[0]
suppression_levels = x[1:]
period = int(np.max(t_list) - np.min(t_list))
periods = np.array([(t + 1) ** split_power_law for t in range(len(suppression_levels))])
periods = (periods / periods.sum() * period).cumsum()
periods[-1] += 0.001 # Prevents floating point errors.
suppression_levels = [suppression_levels[np.argwhere(t <= periods)[0][0]] for t in t_list]
policy = interp1d(t_list, suppression_levels, fill_value='extrapolate')
return policy
def fourier_parametric_policy(x, t_list, suppression_bounds=(0.5, 1.5)):
"""
Generate a piecewise suppression policy over n_days based on interval
splits at levels passed and according to the split_power_law.
Parameters
----------
x: array(float)
First N coefficients for a Fourier series which becomes inversely
transformed to generate a real series. a_0 is taken relative to level
the mean at 0.75 (a0 = 3 * period / 4) * X[0]
t_list: array-like
List of days over which the period.
suppression_bounds: tuple(float)
Lower and upper bounds on the suppression level. This clips the fourier
policy.
Returns
-------
policy: callable
Interpolator for the suppression policy.
"""
frequency_domain = np.zeros(len(t_list))
frequency_domain[0] = (3 * (t_list.max() - t_list.min()) / 4) * x[0]
frequency_domain[1:len(x)] = x[1:]
time_domain = np.fft.ifft(frequency_domain).real + np.fft.ifft(frequency_domain).imag
return interp1d(t_list, time_domain.clip(min=suppression_bounds[0], max=suppression_bounds[1]),
fill_value='extrapolate')
|
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
from NiaPy.algorithms.basic import ParticleSwarmAlgorithm
from NiaPy.task.task import StoppingTask, OptimizationType
from NiaPy.benchmarks import Sphere
from numpy import random as rand, apply_along_axis
def MyInit(task, NP, rnd=rand, **kwargs):
pop = 0.2 + rnd.rand(NP, task.D) * task.bRange
fpop = apply_along_axis(task.eval, 1, pop)
return pop, fpop
# we will run Particle Swarm Algorithm with custom Init function for 5 independent runs
for i in range(5):
task = StoppingTask(D=10, nFES=1000, benchmark=Sphere())
algo = ParticleSwarmAlgorithm(NP=10, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, InitPopFunc=MyInit)
best = algo.run(task=task)
print(best)
|
# -*- coding: utf-8 -*-
from .base import BaseThumbnailEngine
class WandEngine(BaseThumbnailEngine):
"""
Image engine for wand.
"""
def __init__(self):
super(WandEngine, self).__init__()
from wand.image import Image
self._Image = Image
def engine_image_size(self, image):
return image.size
def engine_load_image(self, original):
return self._Image(blob=original.open().read())
def engine_scale(self, image, width, height):
image.resize(width, height)
return image
def engine_cleanup(self, original):
pass
def engine_crop(self, image, size, crop, options):
x, y = crop
width, height = size
image.crop(x, y, width=width, height=height)
return image
def engine_raw_data(self, image, options):
image.compression_quality = options['quality']
image.format = self.get_format(image, options)
return image.make_blob()
def engine_colormode(self, image, colormode):
if colormode == 'RGB':
image.type = 'truecolor'
elif colormode == 'GRAY':
image.type = 'grayscale'
return image
def engine_get_format(self, image):
return image.format
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy.units import Quantity
from astropy.io import fits
__all__ = ["Energy", "EnergyBounds"]
log = logging.getLogger(__name__)
class Energy(Quantity):
"""Energy quantity scalar or array.
This is a `~astropy.units.Quantity` sub-class that adds convenience methods
to handle common tasks for energy bin center arrays, like FITS I/O or generating
equal-log-spaced grids of energies.
See :ref:`energy_handling_gammapy` for further information.
Parameters
----------
energy : `~numpy.array`, scalar, `~astropy.units.Quantity`
Energy
unit : `~astropy.units.UnitBase`, str, optional
The unit of the value specified for the energy. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
"""
def __new__(cls, energy, unit=None, dtype=None, copy=True):
# Techniques to subclass Quantity taken from astropy.coordinates.Angle
# see: http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
if isinstance(energy, str):
val, unit = energy.split()
energy = float(val)
# This is a pylint error false positive
# See https://github.com/PyCQA/pylint/issues/2335#issuecomment-415055075
self = super().__new__(
cls,
energy,
unit, # pylint:disable=redundant-keyword-arg
dtype=dtype,
copy=copy,
)
if not self.unit.is_equivalent("eV"):
raise ValueError(
"Given unit {} is not an" " energy".format(self.unit.to_string())
)
return self
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
def __quantity_subclass__(self, unit):
if unit.is_equivalent("eV"):
return Energy, True
else:
return super().__quantity_subclass__(unit)[0], False
@property
def nbins(self):
"""The number of bins."""
return self.size
@property
def range(self):
"""The covered energy range (tuple)."""
return self[0 : self.size : self.size - 1]
@classmethod
def equal_log_spacing(cls, emin, emax, nbins, unit=None, per_decade=False):
"""Create Energy with equal log-spacing (`~gammapy.utils.energy.Energy`).
if no unit is given, it will be taken from emax
Parameters
----------
emin : `~astropy.units.Quantity`, float
Lowest energy bin
emax : `~astropy.units.Quantity`, float
Highest energy bin
nbins : int
Number of bins
unit : `~astropy.units.UnitBase`, str
Energy unit
per_decade : bool
Whether nbins is per decade.
"""
if unit is not None:
emin = Energy(emin, unit)
emax = Energy(emax, unit)
else:
emin = Energy(emin)
emax = Energy(emax)
unit = emax.unit
emin = emin.to(unit)
x_min, x_max = np.log10([emin.value, emax.value])
if per_decade:
nbins = (x_max - x_min) * nbins
energy = np.logspace(x_min, x_max, nbins)
return cls(energy, unit, copy=False)
@classmethod
def from_fits(cls, hdu, unit=None):
"""Read ENERGIES fits extension (`~gammapy.utils.energy.Energy`).
Parameters
----------
hdu: `~astropy.io.fits.BinTableHDU`
``ENERGIES`` extensions.
unit : `~astropy.units.UnitBase`, str, None
Energy unit
"""
header = hdu.header
fitsunit = header.get("TUNIT1")
if fitsunit is None:
if unit is not None:
log.warning(
"No unit found in the FITS header." " Setting it to {}".format(unit)
)
fitsunit = unit
else:
raise ValueError(
"No unit found in the FITS header." " Please specifiy a unit"
)
energy = cls(hdu.data["Energy"], fitsunit)
return energy.to(unit)
def to_fits(self):
"""Write ENERGIES fits extension
Returns
-------
hdu: `~astropy.io.fits.BinTableHDU`
ENERGIES fits extension
"""
col1 = fits.Column(name="Energy", format="D", array=self.value)
cols = fits.ColDefs([col1])
hdu = fits.BinTableHDU.from_columns(cols)
hdu.name = "ENERGIES"
hdu.header["TUNIT1"] = "{}".format(self.unit.to_string("fits"))
return hdu
class EnergyBounds(Energy):
"""EnergyBounds array.
This is a `~gammapy.utils.energy.Energy` sub-class that adds convenience
methods to handle common tasks for energy bin edges arrays, like FITS I/O or
generating arrays of bin centers.
See :ref:`energy_handling_gammapy` for further information.
Parameters
----------
energy : `~numpy.array`, scalar, `~astropy.units.Quantity`
EnergyBounds
unit : `~astropy.units.UnitBase`, str
The unit of the values specified for the energy. This may be any
string that `~astropy.units.Unit` understands, but it is better to
give an actual unit object.
"""
@property
def nbins(self):
"""The number of bins."""
return self.size - 1
@property
def log_centers(self):
"""Log centers of the energy bounds."""
center = np.sqrt(self[:-1] * self[1:])
return center.view(Energy)
@property
def upper_bounds(self):
"""Upper energy bin edges."""
return self[1:]
@property
def lower_bounds(self):
"""Lower energy bin edges."""
return self[:-1]
@property
def boundaries(self):
"""Energy range."""
return self[[0, -1]]
@property
def bands(self):
"""Width of the energy bins."""
upper = self.upper_bounds
lower = self.lower_bounds
return upper - lower
@classmethod
def from_lower_and_upper_bounds(cls, lower, upper, unit=None):
"""EnergyBounds from lower and upper bounds (`~gammapy.utils.energy.EnergyBounds`).
If no unit is given, it will be taken from upper.
Parameters
----------
lower,upper : `~astropy.units.Quantity`, float
Lowest and highest energy bin
unit : `~astropy.units.UnitBase`, str, None
Energy units
"""
# np.append renders Quantities dimensionless
# http://docs.astropy.org/en/latest/known_issues.html#quantity-issues
if unit is None:
unit = upper.unit
lower = cls(lower, unit)
upper = cls(upper, unit)
energy = np.append(lower.value, upper.value[-1])
return cls(energy, unit)
@classmethod
def equal_log_spacing(cls, emin, emax, nbins, unit=None):
"""EnergyBounds with equal log-spacing (`~gammapy.utils.energy.EnergyBounds`).
If no unit is given, it will be taken from emax.
Parameters
----------
emin : `~astropy.units.Quantity`, float
Lowest energy bin
emax : `~astropy.units.Quantity`, float
Highest energy bin
bins : int
Number of bins
unit : `~astropy.units.UnitBase`, str, None
Energy unit
"""
return super().equal_log_spacing(emin, emax, nbins + 1, unit)
@classmethod
def from_ebounds(cls, hdu):
"""Read EBOUNDS fits extension (`~gammapy.utils.energy.EnergyBounds`).
Parameters
----------
hdu: `~astropy.io.fits.BinTableHDU`
``EBOUNDS`` extensions.
"""
if hdu.name != "EBOUNDS":
log.warning(
"This does not seem like an EBOUNDS extension. " "Are you sure?"
)
header = hdu.header
unit = header.get("TUNIT2")
low = hdu.data["E_MIN"]
high = hdu.data["E_MAX"]
return cls.from_lower_and_upper_bounds(low, high, unit)
@classmethod
def from_rmf_matrix(cls, hdu):
"""Read MATRIX fits extension (`~gammapy.utils.energy.EnergyBounds`).
Parameters
----------
hdu: `~astropy.io.fits.BinTableHDU`
``MATRIX`` extensions.
"""
if hdu.name != "MATRIX":
log.warning("This does not seem like a MATRIX extension. " "Are you sure?")
header = hdu.header
unit = header.get("TUNIT1")
low = hdu.data["ENERG_LO"]
high = hdu.data["ENERG_HI"]
return cls.from_lower_and_upper_bounds(low, high, unit)
def find_energy_bin(self, energy):
"""Find the bins that contain the specified energy values.
Parameters
----------
energy : `~gammapy.utils.energy.Energy`
Array of energies to search for.
Returns
-------
bin_index : `~numpy.ndarray`
Indices of the energy bins containing the specified energies.
"""
# check that the specified energy is within the boundaries
if not self.contains(energy).all():
ss_error = "Specified energy {}".format(energy)
ss_error += " is outside the boundaries {}".format(self.boundaries)
raise ValueError(ss_error)
bin_index = np.searchsorted(self.upper_bounds, energy)
return bin_index
def contains(self, energy):
"""Check of energy is contained in boundaries.
Parameters
----------
energy : `~gammapy.utils.energy.Energy`
Array of energies to test
"""
return (energy > self[0]) & (energy < self[-1])
def to_dict(self):
"""Construct dict representing an energy range."""
if len(self) != 2:
raise ValueError(
"This is not an energy range. Nbins: {}".format(self.nbins)
)
d = dict(min=self[0].value, max=self[1].value, unit="{}".format(self.unit))
return d
@classmethod
def from_dict(cls, d):
"""Read dict representing an energy range."""
return cls((d["min"], d["max"]), d["unit"])
|
import pytest
from src.teamwork.client import Teamwork
from src.teamwork.exceptions import CREDENTIALS_MESSAGE, CredentialsError
def test_credential_raises():
"""Test for credentials, if not provided raise the Error"""
with pytest.raises(CredentialsError) as error:
Teamwork()
assert str(error.value) == CREDENTIALS_MESSAGE
|
# ทำ Chat Bot ง่าย ๆ ในภาษา Python
# เขียนโดย นาย วรรณพงษ์ ภัททิยไพบูลย์
# https://python3.wannaphong.com/2015/07/ทำ-chat-bot-ง่าย-ๆ-ในภาษา-python.html
import random
while True:
text = input("> ")
a = ['HI','Hello']
b = ['Hello :D','Hi',':D']
if text in a:
print(random.choice(b))
else:
print("?")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Dave Kruger.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon import register
from luxon import router
from luxon import db
from luxon import js
from luxon.utils.pkg import EntryPoints
from luxon.helpers.api import raw_list, obj
from luxon.utils.unique import string_id
from luxon.utils.timezone import to_utc
from luxon.exceptions import ValidationError
from luxon.exceptions import HTTPConflict
from netrino.models.orders import netrino_order
@register.resources()
class Orders:
def __init__(self):
router.add('GET', '/v1/orders', self.list,
tag='customer')
router.add('GET', '/v1/order/{oid}', self.view,
tag='customer')
router.add('POST', '/v1/order', self.create,
tag='customer')
router.add(['PUT', 'PATCH'], '/v1/order/{oid}', self.update,
tag='customer')
router.add('POST', '/v1/activate/product/{oid}', self.activate,
tag='services:admin')
router.add('POST', '/v1/deactivate/product/{oid}', self.deactivate,
tag='services:admin')
def _get_service(self, oid):
sql_order = 'SELECT product_id FROM netrino_order WHERE id=?'
sql_product = 'SELECT * FROM netrino_product WHERE id=?'
sql = 'SELECT entrypoint,metadata FROM netrino_product_entrypoint ' \
'WHERE product_id=?'
with db() as conn:
order = conn.execute(sql_order, oid).fetchone()
pid = order['product_id']
product = conn.execute(sql_product, pid).fetchone()
result = conn.execute(sql, pid).fetchone()
if result:
try:
metadata = js.loads(result['metadata'])
except TypeError:
metadata = {}
return product, result['entrypoint'], metadata
return product, None, None
def _get_orders(self, req):
select = {'netrino_order.id': 'id',
'netrino_product.name': 'product_name',
'netrino_order.creation_time': 'creation_time',
'netrino_order.tenant_id': 'tenant_id',
'netrino_order.status':'status',
'netrino_order.short_id': 'short_id'
}
s_from = ['netrino_order', 'netrino_product']
where = {'netrino_order.product_id': 'netrino_product.id'}
vals = []
if req.context_tenant_id:
where['tenant_id'] = None
vals.append(req.context_tenant_id)
else:
select['infinitystone_tenant.name'] = 'tenant_name'
s_from.append('infinitystone_tenant')
where['infinitystone_tenant.id'] = 'netrino_order.tenant_id'
select = ['%s AS %s' % (k,select[k],) for k in select]
select = 'SELECT ' + ','.join(select)
select += ' FROM ' + ','.join(s_from)
where_str = []
for k in where:
if where[k]:
where_str.append('%s=%s' % (k, where[k],))
else:
where_str.append('%s=?' % k)
select += ' WHERE ' + ' AND '.join(where_str)
with db() as conn:
return conn.execute(select, vals).fetchall()
def list(self, req, resp):
orders = self._get_orders(req)
return raw_list(req, orders)
def create(self, req, resp):
order = obj(req, netrino_order)
order.update({'short_id': string_id(25)})
try:
order.commit()
except ValidationError:
raise HTTPConflict(title="Duplicate Order",
description="Please retry this request")
return order
def update(self, req, resp, oid):
"""The following fields can be updated:
status
metadata
payment_date
"""
sql = 'UPDATE netrino_order SET '
vals = []
fields = []
o_sql = 'SELECT * FROM netrino_order WHERE id=?'
with db() as conn:
if 'metadata' in req.json:
emd = conn.execute(o_sql, oid).fetchone()['metadata']
if emd:
md = js.loads(emd)
else:
md = {}
md.update(req.json['metadata'])
fields.append('metadata=?')
vals.append(js.dumps(md))
if 'status' in req.json:
fields.append('status=?')
vals.append(req.json['status'])
if 'payment_date' in req.json:
fields.append('payment_date=?')
vals.append(to_utc(req.json['payment_date']))
sql += ','.join(fields)
sql += ' WHERE id=?'
if vals:
conn.execute(sql, vals + [oid])
conn.commit()
order = conn.execute(o_sql, oid).fetchone()
return order
def view(self, req, resp, oid):
return obj(req, netrino_order, sql_id=oid)
def activate(self, req, resp, oid):
product, ep, metadata = self._get_service(oid)
result = {'reason': 'Nothing to do, no "netrino.product.tasks" '
'entrypoint found'}
if ep:
ep = EntryPoints('netrino.product.tasks')[ep]
ep_obj = ep(req, metadata, oid, product)
result = ep_obj.deploy()
return result
def deactivate(self, req, resp, oid):
product, ep, metadata = self._get_service(oid)
result = {'reason': 'Nothing to do, no "netrino.product.tasks" '
'entrypoint found'}
if ep:
ep = EntryPoints('netrino.product.tasks')[ep]
ep_obj = ep(req, metadata, oid, product)
result = ep_obj.deactivate()
return result
|
import os
from sqlalchemy.ext.declarative import declarative_base
from fns.db.session import create_db_session
from fns.util.resutils import get_database_paths
database_path = os.environ.get("DATABASE_URL") if os.environ.get("DATABASE_URL") else get_database_paths()["LOCAL"]
engine, db_session = create_db_session(database_path)
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
from fns.db.models import Article, Markov
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine)
# TODO: create fixtures (permission roles)
|
# -*- coding: utf-8 -*-
import os
import logging
from ProxyGeoDetector import Detector
from proxy_validator import config
from proxy_validator.chain import Handler, RChain
from proxy_validator.client import Client
Connection_Detect_Targets = config['CONNECTION_DETECT_TARGETS']
Proxy_Types = config['PROXY_TYPES']
Proxy_Type_Detect_Url = config['TYPE_DETECT_URL']
Anonymity_Detect_Url = config['ANONYMITY_DETECT_URL']
def validate_usability(proxy):
client = Client()
client.set_proxies(proxy.proxy_str(), proxy.proxy_type[0])
is_usable = 0
for site in ['baidu', 'google']:
try:
client.get(Connection_Detect_Targets[site])
except Exception as e:
logging.info(e)
is_usable += 0
else:
is_usable += 1
if is_usable == 0:
return True
return False
def validate_connection(proxy):
if proxy.invalid: return []
available_sites = list()
client = Client()
client.set_proxies(proxy.proxy_str(), proxy.proxy_type[0])
for site in Connection_Detect_Targets:
try:
client.get(Connection_Detect_Targets[site])
except Exception as e:
logging.info(e)
continue
else:
available_sites.append(site)
return available_sites
def validate_anonymity(proxy):
if proxy.invalid: return []
client = Client()
client.set_proxies(proxy.proxy_str(), proxy.proxy_type[0])
anonymity = ['unknown']
try:
if Anonymity_Detect_Url:
anonymity = client.get(Anonymity_Detect_Url).json()
except Exception as e:
logging.info(e)
return anonymity
else:
return anonymity
def validate_location(proxy):
if proxy.invalid: return []
db_path = os.path.split(os.path.realpath(__file__))[0] + '/GeoLite2-City.mmdb'
# FIXME: update geo detector default locale
return Detector.open_reader(db_path)(proxy.ip_address, None).location_label()
def validate_proxy_type(proxy):
if proxy.invalid: return []
client = Client()
proxy_type = []
for t in Proxy_Types:
client.set_proxies(proxy.proxy_str(), t)
try:
client.get(Proxy_Type_Detect_Url)
except Exception as e:
logging.info(e)
continue
else:
proxy_type.append(t)
return proxy_type if len(proxy_type) else ['http']
def update_check_at(proxy):
if proxy.invalid: return -1
import time
return time.time()
def save_or_remove(proxy):
proxy.save_or_remove()
validation_chain = RChain() \
.append_handler(Handler('proxy_type', validate_proxy_type)) \
.append_handler(Handler('invalid', validate_usability)) \
.append_handler(Handler('connection', validate_connection)) \
.append_handler(Handler('anonymity', validate_anonymity)) \
.append_handler(Handler('location', validate_location)) \
.append_handler(Handler('last_check_at', update_check_at)) \
.append_handler(Handler('end', save_or_remove))
|
#!/usr/bin/env python
from collections import Counter
from itertools import repeat, chain
from functools import partial
import random
from optparse import OptionParser
import sys
import numpy as np
from scipy import stats
from scipy.stats import wilcoxon
from db import PerfDB
import util
# verbosity
verbose = False
# accumulated total time
g_ttime = 0
ttime_max = 7200000 # 2 hours
# p-value
g_pVal = 0.2
# default-ish degrees
degrees = [16, 32, 64, 128, 512, 1024, 2048, 4096]
# absent degrees
abs_degrees = []
model = {}
sMap = {}
def sampling(data, b, d, n=1):
res = {}
if d not in data[b]:
abs_degrees.append(d)
return res
pop = filter(lambda i: not data[b][d]["ttime"][i][0], range(len(data[b][d]["search space"])))
try:
pop_len = len(data[b][d]["search space"])
#if verbose: print "Sampling from {} trials for degree {}...".format(pop_len, d)
samp = random.sample(pop, n)
except ValueError: # sample larger than population
samp = list(chain.from_iterable(repeat(range(pop_len), n / pop_len))) + random.sample(range(pop_len), n % pop_len)
except TypeError: # single value
samp = repeat(pop_len, n)
res["p"] = data[b][d]["p"]
for k in ["ttime", "search space"]:
res[k] = map(lambda idx: data[b][d][k][idx], samp)
#if verbose: print "data: {}".format(res)
return res
def sim_with_degree(sampler, n_cpu, s_name, d, n_runs = 0):
ttime = 0
found_d = -1
while found_d < 0 and ttime <= ttime_max:
runs = sampler(d, n_cpu)
_n_runs = 0
_ttime = 0
for s, t in runs["ttime"]:
_n_runs = _n_runs + 1
_ttime = _ttime + t
if s:
found_d = d
if verbose: print "{} found a solution within {} trials".format(s_name, n_runs+_n_runs)
_ttime = t
break
n_runs = n_runs + _n_runs
ttime = ttime + _ttime
return found_d, ttime
def run_async_trials(sampler, d, n, s_name):
_soltime = 0
_ttime = 0
found_d = -1
ttime = 0
t_runs = sampler(d, n)
if not t_runs:
return ttime, found_d, len(t_runs)
if d not in model:
model[d] = {}
model[d]["runs"] = []
model[d]["ttime"] = 0
model[d]["search space"] = []
for s, t in t_runs["ttime"]:
model[d]["runs"].append(t)
if found_d > 0:
if s and t < _soltime: _soltime = t
elif s:
found_d = d
_soltime = t
_ttime = _ttime + t
if found_d > 0:
if verbose: print "One of {} async trials found a solution.".format(s_name, len(t_runs["ttime"]))
ttime = _soltime
else:
ttime = (_ttime / len(t_runs["ttime"]))
model[d]["p"] = t_runs["p"]
model[d]["search space"] = model[d]["search space"] + t_runs["search space"]
return ttime, found_d, len(t_runs)
def test_runs(sampler, n_cpu, s_name, ds):
ttime = 0
found_d = -1
n_runs = 0
for d in ds:
if d not in model:
model[d] = {}
model[d]["runs"] = []
model[d]["ttime"] = 0
model[d]["search space"] = []
l = len(model[d]["runs"])
if l >= n_cpu/2: break
#if verbose: print "{} trials exist! Run {} trials more!".format(l, n_cpu/2 - l)
t_runs = sampler(d, n_cpu/2 - l)
_n_runs = 0
_ttime = 0
if not t_runs: break
for s, t in t_runs["ttime"]:
model[d]["runs"].append(t)
_n_runs = _n_runs + 1
_ttime = _ttime + t
if s:
found_d = d
print "{} found a solution while {} test runs".format(s_name, n_runs+_n_runs)
_ttime = t
break
n_runs = n_runs + _n_runs
model[d]["ttime"] = model[d]["ttime"] + _ttime
model[d]["p"] = t_runs["p"]
model[d]["search space"] = model[d]["search space"] + t_runs["search space"]
ttime = ttime + _ttime
if found_d > 0: break
return ttime, found_d, n_runs
def strategy_fixed(d, sampler, n_cpu):
_, ttime = sim_with_degree(sampler, n_cpu, "strategy_fixed_()".format(d), d)
return d, ttime
def strategy_random(sampler, n_cpu):
# pick a degree randomly
d = random.choice(degrees)
print "strategy_random, pick degree: {}".format(d)
_, ttime = sim_with_degree(sampler, n_cpu, "strategy_random", d)
return d, ttime
def strategy_time(f, msg, sampler, n_cpu):
ttime, found_d, n_runs = test_runs(sampler, n_cpu, msg, degrees)
# resampling with likelihood degree
if found_d < 0:
est = []
ds = []
for d in model:
est.append(model[d]["ttime"])
ds.append(d)
idx = est.index(f(est))
d = ds[idx]
print "{}, pick degree: {}".format(msg, d)
_, _ttime = sim_with_degree(sampler, n_cpu, msg, d, n_runs)
ttime = ttime + _ttime
return ttime
def strategy_wilcoxon(sampler, n_cpu, sampleBnd=0):
global g_ttime
if sampleBnd == 0: sampleBnd = max(8, n_cpu/2) * 3
def comp_dist(d):
res = []
if d not in model:
if verbose: print "degree {} does not exist in {}".format(d, model.keys())
return res
for i in xrange(len(model[d]["runs"])):
t = model[d]["runs"][i]
p = model[d]["search space"][i]
if p == 0:
p = 10000
res.append(t * p)
return res
def sampleRequested(degree):
if degree in sMap:
return sMap[degree];
else:
sMap[degree] = 0
return sMap[degree]
def sample(sampler, degree, s_name):
prev = sampleRequested(degree)
sMap[degree] = prev + n_cpu/2
_ttime, _found_d, _n_runs = run_async_trials(sampler, degree, n_cpu/2, s_name)
return _ttime, _found_d, _n_runs
def compare_async(d1, d2):
if verbose: print "Comparing {} and {}:".format(d1, d2)
len_a = 0
len_b = 0
req_a = 0
req_b = 0
_found_d = -1
_n_runs = 0
global g_ttime
while len_a < sampleBnd or len_b < sampleBnd:
dist_a = comp_dist(d1)
dist_b = comp_dist(d2)
len_a = len(dist_a)
len_b = len(dist_b)
if _found_d > 0:
_pvalue = 0
elif not (dist_a and dist_b):
_pvalue = 0
else:
if len(dist_a) != len(dist_b):
if verbose: print "length mismatch: {} vs. {}".format(len(dist_a), len(dist_b))
shorter = min(len(dist_a), len(dist_b))
dist_a = dist_a[:shorter]
dist_b = dist_b[:shorter]
_rank_sum, _pvalue = wilcoxon(dist_a, dist_b)
if verbose: print "p-value: {}".format(_pvalue)
if _pvalue < g_pVal: break
elif len(dist_a) >= sampleBnd and len(dist_b) >= sampleBnd: break
req_a = sampleRequested(d1)
req_b = sampleRequested(d2)
if req_a >= sampleBnd and req_b >= sampleBnd: break
if req_a <= req_b and req_a < sampleBnd:
_ttime, _found_d, _n_runs = sample(sampler, d1, "strategy_wilcoxon")
g_ttime = g_ttime + _ttime
len_a = len_a + _n_runs
req_a = sampleBnd if _n_runs == 0 else req_a + _n_runs
if req_b <= req_a and req_b < sampleBnd:
_ttime, _found_d, _n_runs = sample(sampler, d2, "strategy_wilcoxon")
g_ttime = g_ttime + _ttime
len_b = len_b + _n_runs
req_b = sampleBnd if _n_runs == 0 else req_b + _n_runs
return dist_a, dist_b, _found_d, _n_runs, _pvalue
def compare_single(d1, d2):
if verbose: print "Comparing degrees {} and {}:".format(d1, d2)
_ttime, _found_d, _n_runs = test_runs(sampler, n_cpu, "strategy_wilcoxon", [d1, d2])
g_ttime = g_ttime + _ttime
dist_d1 = comp_dist(d1)
dist_d2 = comp_dist(d2)
if _found_d > 0:
_pvalue = 0
elif not (dist_d1 and dist_d2):
_pvalue = 0
elif len(dist_d1) != len(dist_d2):
if verbose: print "length mismatch: {} vs. {}".format(len(dist_d1), len(dist_d2))
_pvalue = 0
else: _rank_sum, _pvalue = wilcoxon(dist_d1, dist_d2)
if verbose: print "p-value: {}".format(_pvalue)
return dist_d1, dist_d2, _found_d, _n_runs, _pvalue
def binary_search(degree_l, degree_h, cmpr):
if degree_l == degree_h: return degree_l
dist_l, dist_h, found_d, n_runs, pvalue = cmpr(degree_l, degree_h)
if pvalue == 0:
return [degree_l, degree_h]
elif degree_h - degree_l <= degrees[0]:
mean_l = np.mean(dist_l)
mean_h = np.mean(dist_h)
return degree_l if mean_l <= mean_h else degree_h
else:
degree_m = (degree_l + degree_h) / 2
dist_dl, dist_dm, found, n_runs, pvalue = cmpr(degree_l, degree_m)
if pvalue <= g_pVal: # the median diff. is significatly different
if np.mean(dist_dl) < np.mean(dist_dm):
return binary_search(degree_l, degree_m, cmpr)
else:
return binary_search(degree_m, degree_h, cmpr)
else: return degree_m
pivots = [0, 1]
d = None
found_d = -1
fixed = False
n_runs = 0
cmpr = compare_async
while found_d < 0 and not fixed and pivots[1] < len(degrees):
fixed = True
ds = [ degrees[pivot] for pivot in pivots ]
d1, d2 = ds
dist_d1, dist_d2, found_d, n_runs, pvalue = cmpr(d1, d2)
if found_d > 0:
print "strategy_wilcoxon, solution found at degree: {}".format(found_d)
return found_d, g_ttime
elif not dist_d1:
pivots[0] = pivots[0] + 1
pivots[1] = pivots[1] + 1
fixed = False
elif not dist_d2:
pivots[1] = pivots[1] + 1
fixed = False
elif pvalue <= g_pVal: # the median diff. is significatly different
if np.mean(dist_d1) < np.mean(dist_d2):
# left one is better, climbing done
break
else:
pivots[0] = pivots[0] + 1
pivots[1] = pivots[1] + 1
fixed = False
else: # i.e., can't differentiate two degrees
pivots[1] = pivots[1] + 1
fixed = False
if pivots[1] == len(degrees): pivots[1] = pivots[1] - 1
# binary search now
dl = degrees[pivots[0]]
dh = degrees[pivots[1]]
d = binary_search(dl, dh, cmpr)
print "strategy_wilcoxon, pick degree: {}".format(d)
#if found_d < 0 and type(d) is int and g_ttime <= ttime_max:
# found, _ttime = sim_with_degree(sampler, n_cpu, "strategy_wilcoxon", d, n_runs)
# g_ttime = g_ttime + _ttime
return d, g_ttime
def simulate(data, n_cpu, strategy, b):
global degrees
global model
global sMap
#degrees = sorted(data[b].keys())
sampler = partial(sampling, data, b)
res = []
finished = []
dgrs = {}
ranges = []
for i in xrange(301):
model = {}
sMap = {}
_d, _ttime = strategy(sampler, n_cpu)
res.append(_ttime)
if type(_d) is int: # i.e., fixed single degree
finished.append(_ttime)
if _d in dgrs: dgrs[_d] = dgrs[_d] + 1
else: dgrs[_d] = 1
elif type(_d) is list: # i.e., a range of degrees
ranges.append(_d)
low_choice = ((_d[0]-1) / degrees[0] + 1) * degrees[0]
high_choice = _d[1] / degrees[0] * degrees[0]
choices = range(low_choice, high_choice+1, degrees[0])
for i in choices:
if i in dgrs: dgrs[i] = dgrs[i] + (1 / (len(choices) * 1.0))
else: dgrs[i] = 1 / (len(choices) * 1.0)
print "{} simulations ({}%) found fixed degrees!".format(301 - len(ranges), (301-len(ranges))/3.01)
if len(finished) >= 151:
print "Median time to find a degree: {}".format(sorted(finished)[150])
#for [low, high] in ranges:
# for dgr in dgrs:
# if low <= dgr and dgr <= high: dgrs[dgr] = dgrs[dgr] + 1
pop = []
for d in sorted(dgrs.keys()):
est = "N/A"
if d in data[b]:
idxs = filter(lambda i: not data[b][d]["ttime"][i][0], range(len(data[b][d]["search space"])))
est = np.mean(map(lambda i: data[b][d]["ttime"][i][1] * ((data[b][d]["search space"][i] - 1) / n_cpu + 1), idxs))
print "degree {}: {} times (estimated time: {})".format(d, dgrs[d], est)
return res
def main():
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option("--user",
action="store", dest="user", default="sketchperf",
help="user name for database")
parser.add_option("--db",
action="store", dest="db", default="concretization",
help="database name")
parser.add_option("-e", "--eid",
action="append", dest="eids", type="int", default=[],
help="experiment id")
parser.add_option("-d", "--dir",
action="store", dest="data_dir", default="data",
help="output folder")
parser.add_option("-b", "--benchmark",
action="append", dest="benchmarks", default=[],
help="benchmark(s) of interest")
parser.add_option("--all",
action="store_true", dest="all_strategies", default=False,
help="simulate *all* modeled strategies")
parser.add_option("-p", "--p-value",
action="store", dest="p_value", type="float", default=0.2,
help="p-value for Wilcoxon test")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="verbosely print out simulation data")
(opt, args) = parser.parse_args()
global verbose, g_pVal
verbose = opt.verbose
g_pVal = opt.p_value
db = PerfDB(opt.user, opt.db)
db.drawing = True
db.detail_space = True
if not opt.eids: opt.edis = [11]
db.calc_stat(opt.benchmarks, True, opt.eids)
data = db.raw_data
merged = util.merge_succ_fail(data, 1000)
n_cpu = 32
_simulate = partial(simulate, merged, n_cpu)
simulators = {}
simulators["wilcoxon"] = partial(_simulate, strategy_wilcoxon)
if opt.all_strategies:
simulators["random"] = partial(_simulate, strategy_random)
strategy_min_time = partial(strategy_time, min, "strategy_min_time")
strategy_max_time = partial(strategy_time, max, "strategy_max_time")
simulators["min(time)"] = partial(_simulate, strategy_min_time)
simulators["max(time)"] = partial(_simulate, strategy_max_time)
for b in merged:
print "\n=== benchmark: {} ===".format(b)
_simulators = simulators.copy()
if opt.all_strategies:
degrees = sorted(merged[b].keys())
for d in degrees:
strategy_fixed_d = partial(strategy_fixed, d)
_simulators["fixed({})".format(d)] = partial(_simulate, strategy_fixed_d)
for s in sorted(_simulators.keys()):
print "Simulating strategy {}...".format(s)
res = _simulators[s](b)
print "{} simulations done.".format(len(res))
s_q = " | ".join(map(str, util.calc_percentile(res)))
print "{} : {} ({})\n\t[ {} ]".format(s, np.mean(res), np.var(res), s_q)
print "absent degrees: {}".format(Counter(abs_degrees))
if __name__ == "__main__":
sys.exit(main())
|
from typing import (
Callable,
)
from htdfsdk.web3._utils.rpc_abi import (
RPC,
)
from htdfsdk.web3.method import (
Method,
)
from htdfsdk.web3.types import (
TxPoolContent,
TxPoolInspect,
TxPoolStatus,
)
content: Method[Callable[[], TxPoolContent]] = Method(
RPC.txpool_content,
mungers=None,
)
inspect: Method[Callable[[], TxPoolInspect]] = Method(
RPC.txpool_inspect,
mungers=None,
)
status: Method[Callable[[], TxPoolStatus]] = Method(
RPC.txpool_status,
mungers=None,
)
|
import sys
import time
import datetime
import logging
from docopt import docopt
import numpy as np
from pysilcam import __version__
from pysilcam.acquisition import Acquire
from pysilcam.background import backgrounder
from pysilcam.process import processImage, statextract
import pysilcam.oilgas as scog
from pysilcam.config import PySilcamSettings, updatePathLength
import os
import pysilcam.silcam_classify as sccl
import multiprocessing
from multiprocessing.managers import BaseManager
from queue import LifoQueue
import psutil
from shutil import copyfile
import warnings
import pandas as pd
import time
from pysilcam.__main__ import *
import pygame
from skimage.io import imsave as imwrite
logger = logging.getLogger(__name__)
def get_image(datapath, config_filename):
aqgen = liveview_acquire(datapath, config_filename, writeToDisk=False)
while True:
timestamp, im = next(aqgen)
yield timestamp, im
def convert_image(im, size):
im = pygame.surfarray.make_surface(np.uint8(im))
im = pygame.transform.flip(im, False, True)
im = pygame.transform.rotate(im, -90)
im = pygame.transform.scale(im, size)
return im
def liveview_acquire(datapath, config_filename, writeToDisk=False):
'''Aquire images from the SilCam
Args:
datapath (str) : Path to the image storage
config_filename=None (str) : Camera config file
writeToDisk=True (Bool) : True will enable writing of raw data to disc
False will disable writing of raw data to disc
gui=None (Class object) : Queue used to pass information between process thread and GUI
initialised in ProcThread within guicals.py
'''
#Load the configuration, create settings object
settings = PySilcamSettings(config_filename)
#Print configuration to screen
print('---- CONFIGURATION ----\n')
settings.config.write(sys.stdout)
print('-----------------------\n')
if (writeToDisk):
# Copy config file
configFile2Copy = datetime.datetime.now().strftime('D%Y%m%dT%H%M%S.%f') + os.path.basename(config_filename)
copyfile(config_filename, os.path.join(datapath, configFile2Copy))
configure_logger(settings.General)
# update path_length
updatePathLength(settings, logger)
acq = Acquire(USE_PYMBA=True) # ini class
t1 = time.time()
aqgen = acq.get_generator(datapath, camera_config_file=config_filename, writeToDisk=writeToDisk)
for i, (timestamp, imraw) in enumerate(aqgen):
t2 = time.time()
aq_freq = np.round(1.0/(t2 - t1), 1)
requested_freq = settings.Camera.acquisitionframerateabs
rest_time = (1 / requested_freq) - (1 / aq_freq)
rest_time = np.max([rest_time, 0.])
actual_aq_freq = 1/(1/aq_freq + rest_time)
logger.info('Image {0} acquired at frequency {1:.1f} Hz'.format(i, actual_aq_freq))
t1 = time.time()
yield timestamp, imraw
def write_image(datapath, timestamp, imraw):
filename = os.path.join(datapath, timestamp.strftime('D%Y%m%dT%H%M%S.%f.bmp'))
imwrite(filename, np.uint8(imraw))
logger.info('Image written')
def get_image_size(im):
ims = np.shape(im)
return ims
def zoomer(zoom):
zoom += 1
if zoom>2:
zoom = 0
return zoom
def liveview(datapath = '/mnt/DATA/emlynd/DATA/', config_filename = 'config_hardware_test.ini'):
try:
import pymba
except:
logger.info('Pymba not available. Cannot use camera')
return
aqgen = get_image(datapath, config_filename)
timestamp, imraw = next(aqgen)
ims = get_image_size(imraw)
pygame.init()
info = pygame.display.Info()
size = (int(info.current_h / (ims[0]/ims[1]))-50, info.current_h-50)
screen = pygame.display.set_mode(size)
font = pygame.font.SysFont("monospace", 20)
zoom = 0
pause = False
pygame.event.set_blocked(pygame.MOUSEMOTION)
exit = False
c = pygame.time.Clock()
while not exit:
if pause:
event = pygame.event.wait()
if event.type == 12:
exit = True
continue
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_f:
zoom = zoomer(zoom)
if event.key == pygame.K_p:
pause = np.invert(pause)
if event.key == pygame.K_ESCAPE:
exit = True
continue
else:
continue
pygame.time.wait(100)
timestamp, imraw = next(aqgen)
if zoom>0:
label = font.render('ZOOM [F]: ' + str(zoom), 1, (255, 255, 0))
if zoom==1:
imcrop = imraw[int(ims[0]/4):-int(ims[0]/4),
int(ims[1]/4):-int(ims[1]/4),:]
else:
imcrop = imraw[int(ims[0]/2.5):-int(ims[0]/2.5),
int(ims[1]/2.5):-int(ims[1]/2.5),:]
im = convert_image(imcrop, size)
else:
im = convert_image(imraw, size)
label = font.render('ZOOM [F]: OFF', 1, (255, 255, 0))
screen.blit(im, (0, 0))
screen.blit(label,(0, size[1]-20))
label = font.render('pause[p] write[scpace] exit[Esc]', 1, (255,255,0))
screen.blit(label, (0, size[1]-40))
pygame.display.set_caption('Image display')
label = font.render(str(timestamp) + ' Disp. FPS: ' +
str(c.get_fps()), 20, (255, 255, 0))
screen.blit(label,(0,0))
label = font.render('Esc to exit',
1, (255, 255, 0))
screen.blit(label,(0,20))
for event in pygame.event.get():
if event.type == 12:
pygame.quit()
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_f:
zoom = zoomer(zoom)
if event.key == pygame.K_SPACE:
write_image(datapath, timestamp, imraw)
if event.key == pygame.K_p:
pause = np.invert(pause)
if event.key == pygame.K_ESCAPE:
exit = True
continue
pygame.display.flip()
pygame.quit()
|
import unittest
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from accounts.models import AccountDetails
from categories.models import Category
from django.contrib.auth.models import User
from events.models import Event
from tasks.models import Task
class EventsTestCase(TestCase):
def setUp(self):
self.total_number_of_events = 5
self.client = Client()
self.user = User.objects.create_user(
'john',
'lennon@thebeatles.com',
'johnpassword'
)
self.user.details = AccountDetails.objects.create(
user=self.user,
description='cool description',
slug="userslug"
)
self.client.login(username='john', password='johnpassword')
category = Category.objects.create(
name='test event category',
description='cool description',
slug="test",
)
for event_id in range(self.total_number_of_events):
eventstring = "test" + str(event_id)
self.event = Event.objects.create(
title=eventstring,
description=eventstring
)
self.event.save()
self.event.category.add(category)
self.event.team_members.add(self.user)
self.event.save()
# create task
self.task = Task.objects.create(
title="Opaaaa", event=self.event, slug="slug")
self.task.save()
def test_delete_task_url_error(self):
user = User.objects.create_user(
'test',
'lennon@thebeatles.com',
'johnpassword'
)
self.client.login(username='test', password='johnpassword')
url = reverse(
"tasks.delete_task",
kwargs={
'slug': self.event.slug,
"task": "slug"})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_delete_task_url_returns_error(self):
user = User.objects.create_user(
'test',
'lennon@thebeatles.com',
'johnpassword'
)
self.client.login(username='test', password='johnpassword')
url = reverse(
"tasks.delete_task",
kwargs={
'slug': self.event.slug,
"task": "slug"})
response = self.client.get(url)
self.assertContains(response, "Error")
def test_delete_task_url_success(self):
self.client.login(username='john', password='johnpassword')
url = reverse(
"tasks.delete_task",
kwargs={
'slug': self.event.slug,
"task": "slug"})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_edit_task_url_error(self):
user = User.objects.create_user(
'test',
'lennon@thebeatles.com',
'johnpassword'
)
self.client.login(username='test', password='johnpassword')
url = reverse(
"tasks.edit_task",
kwargs={
'slug': self.event.slug,
"task": "slug"})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_edit_task_url_returns_error(self):
user = User.objects.create_user(
'test',
'lennon@thebeatles.com',
'johnpassword'
)
self.client.login(username='test', password='johnpassword')
url = reverse(
"tasks.edit_task",
kwargs={
'slug': self.event.slug,
"task": "slug"})
response = self.client.get(url)
self.assertContains(response, "Error")
def test_edit_task_url_success(self):
self.client.login(username='john', password='johnpassword')
url = reverse(
"tasks.edit_task",
kwargs={
'slug': self.event.slug,
"task": "slug"})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_edit_task_url_success_message(self):
self.client.login(username='john', password='johnpassword')
url = reverse(
"tasks.edit_task",
kwargs={
'slug': self.event.slug,
"task": "slug"})
response = self.client.get(url)
self.assertContains(response, "Edit task:")
self.assertContains(response, self.task.title)
|
import functools
import cachetools
import numpy as np
import pandas as pd
def async_ttl_cache(ttl: int = 3600, maxsize: int = 1):
cache = cachetools.TTLCache(ttl=ttl, maxsize=maxsize)
def decorator(fn):
@functools.wraps(fn)
async def memoize(*args, **kwargs):
key = str((args, kwargs))
try:
return cache[key]
except KeyError:
cache[key] = await fn(*args, **kwargs)
return cache[key]
memoize.cache_clear = lambda: cache.clear()
return memoize
return decorator
def map_df_to_str(df: pd.DataFrame) -> pd.DataFrame:
return df.applymap(lambda x: np.format_float_positional(x, trim="-") if isinstance(x, float) else x).astype(str)
|
# -*- coding: utf-8 -*-
"""pybooru.api_danbooru
This module contains all API calls of Gelbooru.
Classes:
GelbooruApi_Mixin -- Contains all API endspoints.
"""
# __future__ imports
from __future__ import absolute_import
class GelbooruApi_Mixin(object):
"""Contains all Gelbooru API calls.
* API Version commit: ?
* Doc: https://gelbooru.me/index.php?page=wiki&s=view&id=18780
"""
def post_list(self, **params):
"""Get a list of posts.
Parameters:
page (int): The page number starting at 1
tags (str): The tags to search for. Any tag combination that works
on the web site will work here. This includes all the
meta-tags.
"""
params['pid'] = params.pop('page')-1
if( self.site_name == 'rule34'):
return self._get_xml('post', params)
return self._get('post', params)
def tag_list(self, name_pattern=None, name=None, order=None, orderby=None):
"""Get a list of tags.
Parameters:
name_pattern (str): Can be: part or full name.
name (str): Allows searching for tag with given name
order (str): Can be: ASC, DESC.
orderby (str): Can be: name, date, count.
"""
params = {
'name_pattern': name_pattern,
'name': name,
'order': order,
'orderby': orderby
}
if( self.site_name == 'rule34'):
return self._get_xml('tag', params)
return self._get('tag', params)
def artist_list(self, query=None, artist_id=None, creator_name=None,
creator_id=None, any_name_matches=None, is_active=None,
empty_only=None, order=None, is_banned=None, extra_params={}):
"""Get an artist of a list of artists.
Parameters:
query (str):
This field has multiple uses depending on what the query starts
with:
'http:desired_url':
Search for artist with this URL.
'name:desired_url':
Search for artists with the given name as their base name.
'other:other_name':
Search for artists with the given name in their other
names.
'group:group_name':
Search for artists belonging to the group with the given
name.
'status:banned':
Search for artists that are banned. else Search for the
given name in the base name and the other names.
artist_id (id): The artist id.
creator_name (str): Exact creator name.
creator_id (id): Artist creator id.
is_active (bool): Can be: true, false
is_banned (bool): Can be: true, false
empty_only (True): Search for artists that have 0 posts. Can be:
true
order (str): Can be: name, updated_at.
"""
params = {
'search[name]': query,
'search[id]': artist_id,
'search[creator_name]': creator_name,
'search[any_name_matches]': any_name_matches,
'search[creator_id]': creator_id,
'search[is_active]': is_active,
'search[is_banned]': is_banned,
'search[empty_only]': empty_only,
'search[order]': order
}
params.update(extra_params)
return self._get('artists.json', params)
def wiki_list(self, title=None, creator_id=None, body_matches=None,
other_names_match=None, creator_name=None, hide_deleted=None,
other_names_present=None, order=None):
"""Function to retrieves a list of every wiki page.
Parameters:
title (str): Page title.
creator_id (int): Creator id.
body_matches (str): Page content.
other_names_match (str): Other names.
creator_name (str): Creator name.
hide_deleted (str): Can be: yes, no.
other_names_present (str): Can be: yes, no.
order (str): Can be: date, title.
"""
params = {
'search[title]': title,
'search[creator_id]': creator_id,
'search[body_matches]': body_matches,
'search[other_names_match]': other_names_match,
'search[creator_name]': creator_name,
'search[hide_deleted]': hide_deleted,
'search[other_names_present]': other_names_present,
'search[order]': order
}
return self._get('wiki_pages.json', params)
|
n1 = float(input("digite o 1º numero "))
n2 = float(input("digite o 2º numero "))
n3 = float(input("digite o 3º numero "))
n4 = float(input("digite o 4º numero "))
n5 = float(input("digite o 5º numero "))
soma = n1 + n2 + n3 + n4 + n5
media = (soma/5)
print("A soma foi {}".format(soma))
print("A média foi {}".format(media))
|
import pp
from pp import components as pc
@pp.autoname
def test_comb(
pad_size=(200, 200),
wire_width=1,
wire_gap=3,
comb_layer=0,
overlap_zigzag_layer=1,
comb_pad_layer=None,
comb_gnd_layer=None,
overlap_pad_layer=None,
):
""" Superconducting heater device from phidl.geometry
Args:
pad_size=(200, 200)
wire_width=1
wire_gap=3
comb_layer=0
overlap_zigzag_layer=1
comb_pad_layer=None
comb_gnd_layer=None
overlap_pad_layer=None
"""
CI = pp.Component()
if comb_pad_layer is None:
comb_pad_layer = comb_layer
if comb_gnd_layer is None:
comb_gnd_layer = comb_layer
if overlap_pad_layer is None:
overlap_pad_layer = overlap_zigzag_layer
wire_spacing = wire_width + wire_gap * 2
# %% pad overlays
overlay_padb = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=overlap_pad_layer
)
)
overlay_padl = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=comb_pad_layer
)
)
overlay_padt = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=comb_pad_layer
)
)
overlay_padr = CI.add_ref(
pc.rectangle(
size=(pad_size[0] * 9 / 10, pad_size[1] * 9 / 10), layer=comb_gnd_layer
)
)
overlay_padl.xmin = 0
overlay_padl.ymin = 0
overlay_padb.ymax = 0
overlay_padb.xmin = overlay_padl.xmax + pad_size[1] / 5
overlay_padr.ymin = overlay_padl.ymin
overlay_padr.xmin = overlay_padb.xmax + pad_size[1] / 5
overlay_padt.xmin = overlay_padl.xmax + pad_size[1] / 5
overlay_padt.ymin = overlay_padl.ymax
# %% pads
padl = CI.add_ref(pc.rectangle(size=pad_size, layer=comb_layer))
padt = CI.add_ref(pc.rectangle(size=pad_size, layer=comb_layer))
padr = CI.add_ref(pc.rectangle(size=pad_size, layer=comb_layer))
padb = CI.add_ref(pc.rectangle(size=pad_size, layer=overlap_zigzag_layer))
padl_nub = CI.add_ref(
pc.rectangle(size=(pad_size[0] / 4, pad_size[1] / 2), layer=comb_layer)
)
padr_nub = CI.add_ref(
pc.rectangle(size=(pad_size[0] / 4, pad_size[1] / 2), layer=comb_layer)
)
padl.xmin = overlay_padl.xmin
padl.center = [padl.center[0], overlay_padl.center[1]]
padt.ymax = overlay_padt.ymax
padt.center = [overlay_padt.center[0], padt.center[1]]
padr.xmax = overlay_padr.xmax
padr.center = [padr.center[0], overlay_padr.center[1]]
padb.ymin = overlay_padb.ymin
padb.center = [overlay_padb.center[0], padb.center[1]]
padl_nub.xmin = padl.xmax
padl_nub.center = [padl_nub.center[0], padl.center[1]]
padr_nub.xmax = padr.xmin
padr_nub.center = [padr_nub.center[0], padr.center[1]]
# %% connected zig
head = CI.add_ref(pc.compass(size=(pad_size[0] / 12, wire_width), layer=comb_layer))
head.xmin = padl_nub.xmax
head.ymax = padl_nub.ymax
connector = CI.add_ref(pc.compass(size=(wire_width, wire_width), layer=comb_layer))
connector.connect(port="W", destination=head.ports["E"])
old_port = connector.ports["S"]
top = True
obj = connector
while obj.xmax + pad_size[0] / 12 < padr_nub.xmin:
# long zig zag rectangle
obj = CI.add_ref(
pc.compass(
size=(pad_size[1] / 2 - 2 * wire_width, wire_width), layer=comb_layer
)
)
obj.connect(port="W", destination=old_port)
old_port = obj.ports["E"]
if top:
# zig zag edge rectangle
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=comb_layer)
)
obj.connect(port="N", destination=old_port)
top = False
else:
# zig zag edge rectangle
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=comb_layer)
)
obj.connect(port="S", destination=old_port)
top = True
# comb rectange
comb = CI.add_ref(
pc.rectangle(
size=(
(padt.ymin - head.ymax)
+ pad_size[1] / 2
- (wire_spacing + wire_width) / 2,
wire_width,
),
layer=comb_layer,
)
)
comb.rotate(90)
comb.ymax = padt.ymin
comb.xmax = obj.xmax - (wire_spacing + wire_width) / 2
old_port = obj.ports["E"]
obj = CI.add_ref(pc.compass(size=(wire_spacing, wire_width), layer=comb_layer))
obj.connect(port="W", destination=old_port)
old_port = obj.ports["E"]
obj = CI.add_ref(pc.compass(size=(wire_width, wire_width), layer=comb_layer))
obj.connect(port="W", destination=old_port)
if top:
old_port = obj.ports["S"]
else:
old_port = obj.ports["N"]
old_port = obj.ports["E"]
if padr_nub.xmin - obj.xmax > 0:
tail = CI.add_ref(
pc.compass(size=(padr_nub.xmin - obj.xmax, wire_width), layer=comb_layer)
)
else:
tail = CI.add_ref(pc.compass(size=(wire_width, wire_width), layer=comb_layer))
tail.connect(port="W", destination=old_port)
# %% disconnected zig
dhead = CI.add_ref(
pc.compass(
size=(padr_nub.ymin - padb.ymax - wire_width, wire_width),
layer=overlap_zigzag_layer,
)
)
dhead.rotate(90)
dhead.ymin = padb.ymax
dhead.xmax = tail.xmin - (wire_spacing + wire_width) / 2
connector = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
connector.connect(port="S", destination=dhead.ports["E"])
old_port = connector.ports["N"]
right = True
obj = connector
while obj.ymax + wire_spacing + wire_width < head.ymax:
obj = CI.add_ref(
pc.compass(size=(wire_spacing, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="W", destination=old_port)
old_port = obj.ports["E"]
if right:
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="W", destination=old_port)
right = False
else:
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="E", destination=old_port)
right = True
old_port = obj.ports["N"]
obj = CI.add_ref(
pc.compass(
size=(
dhead.xmin - (head.xmax + head.xmin + wire_width) / 2,
wire_width,
),
layer=overlap_zigzag_layer,
)
)
obj.connect(port="E", destination=old_port)
old_port = obj.ports["W"]
obj = CI.add_ref(
pc.compass(size=(wire_width, wire_width), layer=overlap_zigzag_layer)
)
obj.connect(port="S", destination=old_port)
if right:
old_port = obj.ports["W"]
else:
old_port = obj.ports["E"]
return CI
if __name__ == "__main__":
c = test_comb()
pp.show(c)
|
from os import *
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import pytest
import threadloop
from .mock_server import MockServer
from .util import get_thrift_service_module
class _MockConnection(object):
def __init__(self):
self.buff = bytearray()
self.remote_host = "0.0.0.0"
self.remote_host_port = "0"
def write(self, payload, callback=None):
self.buff.extend(payload)
def getvalue(self):
return self.buff
@pytest.fixture
def connection():
"""Make a mock connection."""
return _MockConnection()
@pytest.yield_fixture
def mock_server():
with MockServer() as server:
yield server
@pytest.yield_fixture
def thrift_service(tmpdir):
with get_thrift_service_module(tmpdir, True) as m:
yield m
@pytest.yield_fixture
def loop():
tl = threadloop.ThreadLoop()
tl.start()
yield tl
tl.stop()
|
import random
from BaseAI import BaseAI
from helper_functions import *
class ComputerAI(BaseAI):
def __init__(self, initial_position=None) -> None:
super().__init__()
self.pos = initial_position
self.player_num = None
def setPosition(self, new_pos: tuple):
self.pos = new_pos
def getPosition(self):
return self.pos
def getPlayerNum(self):
return self.player_num
def setPlayerNum(self, num):
self.player_num = num
def getMove(self, grid):
""" Returns a random, valid move """
# find all available moves
available_moves = grid.get_neighbors(self.pos, only_available = True)
# make random move
new_pos = random.choice(available_moves) if available_moves else None
return new_pos
def getTrap(self, grid: Grid):
"""Get the *intended* trap move of the player"""
# find all available cells in the grid
available_cells = grid.getAvailableCells()
# find all available cells
trap = random.choice(available_cells) if available_cells else None
return trap
|
import numpy as np
class CONST_SYNAPSE():
'''
This synapse can be represented
by a single non changing weight
'''
def __init__(self, w, I0, tau, tau_s, tau_d):
self.w = w
self.I0 = I0
self.tau = tau
self.tau_s = tau_s
self.tau_d = tau_d
def getI(self, V_train, spike_instants, delta_t):
'''
V_train : 1 X n_t
spike_instants : list(arr(num_of_spikes))
returns It = 1 X n_t
'''
n_t = V_train.shape[1]
self.It = np.zeros(shape=(1,n_t))
spike_instants_delayed = [si+int(self.tau_d//delta_t) for si in spike_instants]
# print(spike_instants_delayed)
# return
for t in range(n_t):
contribution = np.array(spike_instants_delayed[0])<t
contribution_i = np.where(contribution == 1)[0]
t_calc = np.array(spike_instants_delayed[0][contribution_i])
if t_calc.size != 0:
s = self.f(t*delta_t, t_calc*delta_t)
self.It[0, t] = self.I0*self.w*s
else:
self.It[0, t] = 0
return self.It
def f(self, t, t_calc):
s1 = np.exp(-(t - t_calc)/self.tau)
s2 = np.exp(-(t - t_calc)/self.tau_s)
s = s1-s2
s = np.sum(s)
return s
class PLASTIC_SYNAPSE_A():
'''
This synapse can be represented
by a single weight, update rule as given in update function
'''
def __init__(self, w, I0, tau, tau_s, tau_d):
self.w = w
self.I0 = I0
self.tau = tau
self.tau_s = tau_s
self.tau_d = tau_d
def getI(self, V_train, spike_instants, delta_t):
'''
V_train : 1 X n_t
spike_instants : list(arr(num_of_spikes))
returns It = 1 X n_t
'''
n_t = V_train.shape[1]
self.It = np.zeros(shape=(1,n_t))
spike_instants_delayed = [si+int(self.tau_d//delta_t) for si in spike_instants]
# print(spike_instants_delayed)
# return
for t in range(n_t):
contribution = np.array(spike_instants_delayed[0])<t
contribution_i = np.where(contribution == 1)[0]
t_calc = np.array(spike_instants_delayed[0][contribution_i])
if t_calc.size != 0:
s = self.f(t*delta_t, t_calc*delta_t)
self.It[0, t] = self.I0*self.w*s
else:
self.It[0, t] = 0
return self.It
def f(self, t, t_calc):
s1 = np.exp(-(t - t_calc)/self.tau)
s2 = np.exp(-(t - t_calc)/self.tau_s)
s = s1-s2
s = np.sum(s)
return s
# upd_coeff is {-1,1} according to increment/decrement rule
def weight_update(self, gamma, delta_tk, upd_coeff):
'''
update the weight and will return the delta by which it updated
'''
s1 = np.exp(- delta_tk/self.tau)
s2 = np.exp(- delta_tk/self.tau_s)
if upd_coeff == -1:
if self.w <= 1:
self.w = 1
return 1-self.w
else:
self.w = self.w + upd_coeff*self.w*gamma*(s1 - s2)
return upd_coeff*self.w*gamma*(s1 - s2)
# print('weights fixed to 10')
elif upd_coeff == 1:
if self.w >= 500:
self.w = 500
return 500-self.w
# print('weights fixed to 500')
else:
self.w = self.w + upd_coeff*self.w*gamma*(s1 - s2)
return upd_coeff*self.w*gamma*(s1 - s2)
class PLASTIC_SYNAPSE_B():
'''
This synapse will use updates
considering the delayed time effect
'''
def __init__(self, w, I0, tau, tau_s, tau_d, tau_l, A_up, A_dn):
self.w = w
self.I0 = I0
self.tau = tau
self.tau_s = tau_s
self.tau_d = tau_d
# for weight updates
self.tau_l = tau_l
self.A_up = A_up
self.A_dn = A_dn
def getI(self, V_train, spike_instants, delta_t):
'''
V_train : 1 X n_t
spike_instants : list(arr(num_of_spikes))
returns It = 1 X n_t
'''
n_t = V_train.shape[1]
self.It = np.zeros(shape=(1,n_t))
spike_instants_delayed = [si+int(self.tau_d//delta_t) for si in spike_instants]
# print(spike_instants_delayed)
# return
for t in range(n_t):
contribution = np.array(spike_instants_delayed[0])<t
contribution_i = np.where(contribution == 1)[0]
t_calc = np.array(spike_instants_delayed[0][contribution_i])
if t_calc.size != 0:
s = self.f(t*delta_t, t_calc*delta_t)
self.It[0, t] = self.I0*self.w*s
else:
self.It[0, t] = 0
return self.It
def f(self, t, t_calc):
s1 = np.exp(-(t - t_calc)/self.tau)
s2 = np.exp(-(t - t_calc)/self.tau_s)
s = s1-s2
s = np.sum(s)
return s
# upd_coeff represents upstream or downstream
def weight_update(self, delta_tk, upd_coeff):
'''
update the weight and will return the delta by which it updated
'''
# print('old w: ', self.w)
s1 = np.exp(- delta_tk/self.tau_l)
# print('s1: ', s1)
if upd_coeff==1:
# upstream
self.w = self.w + self.w*(self.A_up*s1)
elif upd_coeff == -1:
self.w = self.w + self.w*(self.A_dn*s1)
# print('new w: ', self.w)
return self.w
'''
using SYNAPSE and SPIKETRAINS:
import matplotlib.pyplot as plt
from SpikeTrains import POISSON_SPIKE_TRAIN, RANDOM_SPIKE_TRAIN
n_out = 1
T = 500*(10**-3)
delta_t = 0.1*(10**-3)
n_t = int(T/delta_t)
ST = POISSON_SPIKE_TRAIN(T=T, delta_t=delta_t, lamb=10, n_out=n_out)
V, SI = ST.V_train, ST.spike_instants
w = 500
I0 = 1*(10**-12)
tau = 15*(10**-3)
tau_s = tau/4
Sy = CONST_SYNAPSE(w, I0, tau, tau_s)
I = Sy.getI(V, SI, delta_t)
plt.figure()
plt.suptitle('spike train and synaptic current')
plt.subplot(2,1,1)
plt.plot(list(range(n_t+1)), V[0,:])
plt.xlabel('time')
plt.ylabel('V')
plt.subplot(2,1,2)
plt.plot(list(range(n_t+1)), I[0,:])
plt.xlabel('time')
plt.ylabel('I')
plt.show()
'''
|
# Generated by Django 2.2.24 on 2021-12-21 10:18
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedBack',
fields=[
('key', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('email', models.EmailField(default='email', max_length=254)),
('rating', models.FloatField()),
('message', models.TextField(blank=True, max_length=255)),
('isContact', models.BooleanField(default=False)),
],
),
]
|
from .twitter import TwitterHandler
from .pixiv import PixivHandler
|
from typing import List, Generator, Tuple
from .window import Window, Trackbar
from ..app import Application, Config
from ..backend import cv
from ..frame import AnyFrame
class WindowManager:
"""Manages windows."""
def __init__(self, windows: List[Window] = None, app: Application = None):
"""
:param windows: Windows to manage. If not set,
one window is automatically constructed.
:param app: ocvproto application object. Automatically constructed if not set.
"""
if app is None:
app = Application()
self.app = app
if not windows:
windows = [Window()]
self._windows = windows
self.bind_trackbar_keys()
app.set_loop_func(self.render)
self._hooks_bind()
def _hooks_bind(self):
app = self.app
app.hook_register('config_save', self.config_update)
app.hook_register('config_load', self.config_load)
@property
def window(self) -> Window:
"""Default window."""
return self._windows[0]
def config_update(self, config: Config):
"""Updates data gathered from managed windows in the given config.
:param config:
"""
data = {}
for window, trackbar in self.iter_trackbars():
window_data = data.setdefault(window.name, {'trackvals': {}})
window_data['trackvals'][trackbar.name] = trackbar.value
config.set_data('windows', data)
def config_load(self, config: Config):
"""Updates managed windows using data from the given config.
:param config:
"""
windows_data: dict = config.get_data('windows', {})
for window, trackbar in self.iter_trackbars():
window_data = windows_data.get(window.name)
if window_data:
trackbar_vals = window_data.get('trackvals')
if trackbar_vals:
trackbar_val = trackbar_vals.get(trackbar.name)
if trackbar_val is not None:
trackbar.value = trackbar_val
def iter_trackbars(self) -> Generator[Tuple[Window, Trackbar], None, None]:
"""Generator yielding managed windows and trackbars."""
for window in self._windows:
for trackbar in window.trackbars.values():
yield window, trackbar
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
cv.destroyAllWindows()
def set_frame(self, frame: AnyFrame):
"""Sets frame to be rendered in default window.
:param frame:
"""
self.window.set_frame(frame)
def render(self):
"""Renders managed windows."""
for window in self._windows:
window.render()
def bind_trackbar_keys(self):
bind = self.app.bind_key
for window, trackbar in self.iter_trackbars():
for key, func in trackbar.keys.items():
bind(key, func)
|
def somar(n, n2):
return n + n2
|
import socket
TCP_IP = 'http://www.sapo.pt'
TCP_PORT = 80
BUFFER_SIZE = 1024
MESSAGE = "Hello, World!"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
s.send(MESSAGE)
data = s.recv(BUFFER_SIZE)
s.close()
print "received data:", data
|
import re
from day import Day
from utils.grid import Grid
from utils.point import UniquePoint
from utils.timer import Timer
rx_collapsible = re.compile(r'(?P<start>[(|^])(?P<prefix>[NESW]+)\((?P<contents>[NESW|]+)\)')
class DoorGrid(Grid):
fallback = '#'
directions = {
'N': UniquePoint.Y_MINUS,
'S': UniquePoint.Y_PLUS,
'W': UniquePoint.X_MINUS,
'E': UniquePoint.X_PLUS,
}
def __init__(self):
super().__init__()
self.resolved = {}
def update_bounds(self, x, y):
self.max_x = max(self.max_x, x + 1)
self.max_y = max(self.max_y, y + 1)
self.min_x = x if self.min_x is None else min(self.min_x, x - 1)
self.min_y = y if self.min_y is None else min(self.min_y, y - 1)
def add_regex(self, rx_input):
start = UniquePoint(1000, 1000)
self._solve_path(start, rx_input)
self[start] = 'X'
def _resolve_paths(self):
start = UniquePoint(1000, 1000)
queue = [start]
self.resolved = {start: 0}
while queue:
point = queue.pop(0)
current_val = self.resolved.get(point)
step_val = current_val + 1
for next_step in self.next_possible(point):
next_val = self.resolved.get(next_step)
if next_step != start and (not next_val or next_val > step_val):
self.resolved[next_step] = step_val
queue.append(next_step)
return max(self.resolved.values())
def find_furthest_point(self):
if not self.resolved:
self._resolve_paths()
return max(self.resolved.values())
def get_all_paths_over_length(self, length):
if not self.resolved:
self._resolve_paths()
return len([x for x in self.resolved.values() if x >= length])
def next_possible(self, current):
next_points = []
for direction in self.directions.values():
in_direction = current + direction
if self[in_direction] in '|-':
next_points.append(in_direction + direction)
return next_points
def _move(self, point, direction):
self[point] = '.'
point += self.directions[direction]
self[point] = '|' if direction in 'WE' else '-'
point += self.directions[direction]
self[point] = '.'
return point
def _solve_path(self, start, paths):
paths = self._collapse_paths(paths)
t = Timer()
print(f'checking out {len(paths)} paths')
for path in paths:
location = start
for step in path:
location = self._move(location, step)
size_y = self.max_y - self.min_y
size_x = self.max_x - self.min_x
t.next(f'created {size_y} x {size_x} grid')
@staticmethod
def _collapse_paths(str_in):
# at first i tried actually recursively walking the paths,
# but resolving the subpaths first is waaay more performant
match = rx_collapsible.search(str_in)
while match:
prefix = match.group('prefix')
contents = match.group('contents').split('|')
start = match.start()
end = match.end()
str_in = str_in[:start] + match.group('start') + '|'.join([prefix + c for c in contents]) + str_in[end:]
match = rx_collapsible.search(str_in)
return str_in[1:-1].split('|')
class Day20(Day):
grid: DoorGrid
def part1(self):
self.grid = DoorGrid()
self.grid.add_regex(self.input[0])
return self.grid.find_furthest_point()
def part2(self):
return self.grid.get_all_paths_over_length(1000)
|
import os
import pexpect
import rootfs_boot
from lib.installers import install_jmeter
from devices import board, lan, prompt
from devices.common import scp_from
class JMeter(rootfs_boot.RootFSBootTest):
'''Runs JMeter jmx file from LAN device'''
jmx = "https://jmeter.apache.org/demos/ForEachTest2.jmx"
shortname = "ForEachTest2"
def runTest(self):
install_jmeter(lan)
if self.jmx.startswith('http'):
lan.sendline('curl %s > test.jmx' % self.jmx)
lan.expect(prompt)
else:
print("Copying %s to lan device")
lan.copy_file_to_server(self.jmx, dst='/root/test.jmx')
lan.sendline('rm -rf output *.log')
lan.expect(prompt)
lan.sendline('mkdir -p output')
lan.expect(prompt)
board.collect_stats(stats=['mpstat'])
lan.sendline('jmeter -n -t test.jmx -l foo.log -e -o output')
lan.expect_exact('jmeter -n -t test.jmx -l foo.log -e -o output')
for i in range(600):
if 0 != lan.expect([pexpect.TIMEOUT] + prompt, timeout=5):
break;
conns = board.get_nf_conntrack_conn_count()
board.get_proc_vmstat()
board.touch()
if i > 100 and conns < 20:
raise Exception("jmeter is dead/stuck/broke, aborting the run")
if i == 599:
raise Exception("jmeter did not have enough time to complete")
#lan.sendline('rm -rf output')
#lan.expect(prompt)
lan.sendline('rm test.jmx')
lan.expect(prompt)
self.recover()
def recover(self):
lan.sendcontrol('c')
lan.expect(prompt)
print "Copying files from lan to dir = %s" % self.config.output_dir
lan.sendline('readlink -f output/')
lan.expect('readlink -f output/')
lan.expect(prompt)
fname=lan.before.strip()
scp_from(fname, lan.ipaddr, lan.username, lan.password, lan.port, os.path.join(self.config.output_dir, 'jmeter_%s' % self.shortname))
# let board settle down
board.expect(pexpect.TIMEOUT, timeout=30)
board.parse_stats(dict_to_log=self.logged)
self.result_message = 'JMeter: DONE, name = %s cpu usage = %s' % (self.shortname, self.logged['mpstat'])
class JMeter_10x_10u_5t(JMeter):
'''Runs JMeter jmx 10x_10u_5t'''
jmx = os.path.join(os.path.dirname(__file__), 'jmeter/httpreq_10x_10u_5t.jmx')
name = "httpreq_10x_10u_5t"
class JMeter_1x_9u_5t(JMeter):
'''Runs JMeter jmx 1x_9u_5t'''
jmx = os.path.join(os.path.dirname(__file__), 'jmeter/httpreq_1x_9u_5t.jmx')
name = "httpreq_1x_9u_5t"
class JMeter_20x_9u_1t(JMeter):
'''Runs JMeter jmx 20x_9u_1t'''
jmx = os.path.join(os.path.dirname(__file__), 'jmeter/httpreq_20x_9u_1t.jmx')
name = "httpreq_20x_9u_1t"
class JMeter_20x_9u_1t_300msdelay(JMeter):
'''Runs JMeter jmx 20x_9u_1t_300msdelay'''
jmx = os.path.join(os.path.dirname(__file__), 'jmeter/httpreq_20x_9u_1t_300msdelay.jmx')
name = "httpreq_20x_9u_1t_300msdelay"
class JMeter_20x_9u_1t_500msdelay(JMeter):
'''Runs JMeter jmx 20x_9u_1t_500msdelay'''
jmx = os.path.join(os.path.dirname(__file__), 'jmeter/httpreq_20x_9u_1t_500msdelay.jmx')
name = "httpreq_20x_9u_1t_500msdelay"
class JMeter_20x_9u_1t_1000msdelay(JMeter):
'''Runs JMeter jmx 20x_9u_1t_1000msdelay'''
jmx = os.path.join(os.path.dirname(__file__), 'jmeter/httpreq_20x_9u_1t_1000msdelay.jmx')
name = "httpreq_20x_9u_1t_1000msdelay"
class JMeter_20x_9u_1t_1500msdelay(JMeter):
'''Runs JMeter jmx 20x_9u_1t_1500msdelay'''
jmx = os.path.join(os.path.dirname(__file__), 'jmeter/httpreq_20x_9u_1t_1500msdelay.jmx')
name = "httpreq_20x_9u_1t_1500msdelay"
|
#!/usr/bin/python3
from __future__ import print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)
# Enable GPU dynamic memory allocation
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import sys
import argparse
import cv2
import numpy as np
from collections import OrderedDict
import time
sys.path.append(os.getcwd() + '/../../src')
from config import cfg
from prior_box import PriorBox
from nms import nms
from utils import decode
from timer import Timer
from yufacedetectnet import YuFaceDetectNet
from yufacedetectnet1 import YuFaceDetectNet1
parser = argparse.ArgumentParser(description='Face and Landmark Detection')
# parser.add_argument('-m', '--trained_model', default='weights/yunet_final.pth',
# type=str, help='Trained state_dict file path to open')
parser.add_argument('--image_file', default='', type=str, help='the image file to be detected')
parser.add_argument('--confidence_threshold', default=0.50, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.3, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
#parser.add_argument('-s', '--show_image', action="store_true", default=False, help='show detection results')
parser.add_argument('--vis_thres', default=0.3, type=float, help='visualization_threshold')
parser.add_argument('--base_layers', default=16, type=int, help='the number of the output of the first layer')
parser.add_argument('--device', default='cuda:0', help='which device the program will run on. cuda:0, cuda:1, ...')
args = parser.parse_args()
vid_path = "facetest.mp4"
@tf.function
def infer(net,img):
loc, conf, iou = net(img)
return loc, conf, iou
if __name__ == '__main__':
img_dim = 320
"""### Keras functional model"""
# net = YuFaceDetectNet1('train', (180,320,3)) # use this model for SAVEDMODEL conversion. Subclassed model converted to SAVEDMODEL format doesnt work well
# net.load_weights("checkpoints/adam_net.h5")
"""for conversion to SAVED MODEL run these along with above 2 lines"""
# net(tf.random.normal((8,320,320,3)))
# net.save("saved_model",save_format="tf")
# tf.saved_model.save(net,"saved_model")
# exit()
##*****************************##
"""###Subclassed Model"""
net = YuFaceDetectNet("test", (320,320,3))
net.load_weights("subclass/weights")
"""#loading Saved model / TRT (TF-TRT conversion) model"""
# net = tf.saved_model.load("/home/arm/Projects/LibFaceDetection/My work/saved_model")
# net = tf.saved_model.load("/home/arm/Projects/LibFaceDetection/My work/trt")
# print("Printing net...")
# print(net.weights)
# net.summary()
print('Finished loading model!')
_t = {'forward_pass': Timer(), 'misc': Timer()}
cap = cv2.VideoCapture(vid_path) #27sec
# cap = cv2.VideoCapture("filesrc location=\"/path/to/video.mp4\" decodebin ! videoconvert ! autovideosink",cv2.CAP_GSTREAMER)
# cap = cv2.VideoCapture("rtspsrc location=rtsp://admin:abcd1234@10.0.0.236 latency=0 ! rtph264depay ! avdec_h264 ! videoconvert ! appsink",cv2.CAP_GSTREAMER)
(grabbed, frame) = cap.read()
im_height1, im_width1, _ = frame.shape
# used to record the time when we processed last frame
prev_frame_time = 0
# used to record the time at which we processed current frame
new_frame_time = 0
FPS=0
tot=0
im_width = img_dim
im_height = int((im_width*im_height1)/im_width1)
# im_height = img_dim
priorbox = PriorBox(cfg, image_size=(im_height,im_width))
priors = priorbox.forward()
# priors = priors.to(device)
# exit()
scale = np.array([im_width, im_height, im_width, im_height])
# im_width, im_height, im_width, im_height,
# im_width, im_height, im_width, im_height,
# im_width, im_height ])
vid_start=time.time()
while True:
new_frame_time = time.time()
preproc_start = time.time()
ret, img_raw = cap.read()
if ret:
img = cv2.resize(img_raw,(im_width, im_height))
img = tf.convert_to_tensor(img,dtype="float32")
img = img[tf.newaxis, ...]
preproc_end = time.time()
print("preproc:",(preproc_end-preproc_start)*1000)
_t['forward_pass'].tic()
loc, conf, iou = infer(net,img) # forward pass
print("Inference_Time",_t['forward_pass'].toc()*1000)
_t['misc'].tic()
prior_data = priors
boxes = decode(tf.squeeze(loc,axis=0), prior_data, cfg['variance'])
boxes = boxes * scale
cls_scores = tf.squeeze(conf,axis=0).numpy()[:, 1]
""" use these 2 lines if using functional """
# cls_scores = tf.keras.layers.Softmax(axis=-1)(conf)
# cls_scores = tf.squeeze(cls_scores,axis=0).numpy()[:, 1]
iou_scores = tf.squeeze(iou,axis=0).numpy()[:, 0]
# clamp here for the compatibility for ONNX
_idx = np.where(iou_scores < 0.)
iou_scores[_idx] = 0.
_idx = np.where(iou_scores > 1.)
iou_scores[_idx] = 1.
scores = np.sqrt(cls_scores * iou_scores)
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
scores = scores[order]
# print('there are', len(boxes), 'candidates')
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
selected_idx = np.array([0,1,2,3,4])
keep = nms(dets[:,selected_idx], args.nms_threshold)
dets = dets[keep, :]
# keep top-K faster NMS
dets = dets[:args.keep_top_k, :]
# show image
for b in dets:
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_raw, (int(b[0]*(im_width1/im_width)), int(b[1]*(im_height1/im_height))),
(int(b[2]*(im_width1/im_width)), int(b[3]*(im_height1/im_height))), (0, 255, 0), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(img_raw, text, (int(cx*(im_width1/im_width)), int(cy*(im_height1/im_height))),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
total_time=new_frame_time-prev_frame_time
fps = 1/(new_frame_time-prev_frame_time)
prev_frame_time = new_frame_time
FPS = FPS+fps
tot = tot+1
# converting the fps into integer
fps = str(round(fps,1))
cv2.putText(img_raw,fps, (15,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0),2)
cv2.namedWindow('res', cv2.WINDOW_NORMAL )
cv2.imshow('res', img_raw)
# cv2.resizeWindow('res', im_width, im_height)
# out.write(img_raw)
print("misc_Time",_t['misc'].toc()*1000)
print("Total-Time",total_time*1000,"\n")
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
vid_end=time.time()
print("AVG FPS:",FPS/tot)
print("Total_Process:",vid_end-vid_start)
# tf.saved_model.save(net,"saved_model")
# cap.release()
cv2.destroyAllWindows()
|
import usb.core
import usb.util
import threading
def is_usb_printer(dev):
if dev.bDeviceClass == 7:
return True
for cfg in dev:
if usb.util.find_descriptor(cfg, bInterfaceClass=7) is not None:
return True
class PyUSBBackend():
def __init__(self, dev):
self.dev = dev
self.lock = threading.Lock()
def __enter__(self):
"""Enter"""
return self
def __exit__(self, exc_type, exc_value, trace):
"""Exit + close the device"""
usb.util.dispose_resources(self.dev)
@classmethod
def auto(cls):
dev = usb.core.find(custom_match=is_usb_printer)
if dev is None:
raise OSError('Device not found')
dev.set_configuration(dev.configurations()[0])
return cls(dev)
def write(self, data: bytes):
self.dev.write(0x2, data)
def read(self, count: int) -> bytes:
return self.dev.read(0x81, count)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/services/change_status_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v2.proto.resources import change_status_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_change__status__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/services/change_status_service.proto',
package='google.ads.googleads.v2.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v2.servicesB\030ChangeStatusServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V2.Services\312\002 Google\\Ads\\GoogleAds\\V2\\Services\352\002$Google::Ads::GoogleAds::V2::Services'),
serialized_pb=_b('\nBgoogle/ads/googleads_v2/proto/services/change_status_service.proto\x12 google.ads.googleads.v2.services\x1a;google/ads/googleads_v2/proto/resources/change_status.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\"/\n\x16GetChangeStatusRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xe9\x01\n\x13\x43hangeStatusService\x12\xb4\x01\n\x0fGetChangeStatus\x12\x38.google.ads.googleads.v2.services.GetChangeStatusRequest\x1a/.google.ads.googleads.v2.resources.ChangeStatus\"6\x82\xd3\xe4\x93\x02\x30\x12./v2/{resource_name=customers/*/changeStatus/*}\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xff\x01\n$com.google.ads.googleads.v2.servicesB\x18\x43hangeStatusServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V2.Services\xca\x02 Google\\Ads\\GoogleAds\\V2\\Services\xea\x02$Google::Ads::GoogleAds::V2::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_change__status__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,])
_GETCHANGESTATUSREQUEST = _descriptor.Descriptor(
name='GetChangeStatusRequest',
full_name='google.ads.googleads.v2.services.GetChangeStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.services.GetChangeStatusRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=220,
serialized_end=267,
)
DESCRIPTOR.message_types_by_name['GetChangeStatusRequest'] = _GETCHANGESTATUSREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetChangeStatusRequest = _reflection.GeneratedProtocolMessageType('GetChangeStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCHANGESTATUSREQUEST,
__module__ = 'google.ads.googleads_v2.proto.services.change_status_service_pb2'
,
__doc__ = """Request message for
'[ChangeStatusService.GetChangeStatus][google.ads.googleads.v2.services.ChangeStatusService.GetChangeStatus]'.
Attributes:
resource_name:
The resource name of the change status to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.GetChangeStatusRequest)
))
_sym_db.RegisterMessage(GetChangeStatusRequest)
DESCRIPTOR._options = None
_CHANGESTATUSSERVICE = _descriptor.ServiceDescriptor(
name='ChangeStatusService',
full_name='google.ads.googleads.v2.services.ChangeStatusService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=270,
serialized_end=503,
methods=[
_descriptor.MethodDescriptor(
name='GetChangeStatus',
full_name='google.ads.googleads.v2.services.ChangeStatusService.GetChangeStatus',
index=0,
containing_service=None,
input_type=_GETCHANGESTATUSREQUEST,
output_type=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_change__status__pb2._CHANGESTATUS,
serialized_options=_b('\202\323\344\223\0020\022./v2/{resource_name=customers/*/changeStatus/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_CHANGESTATUSSERVICE)
DESCRIPTOR.services_by_name['ChangeStatusService'] = _CHANGESTATUSSERVICE
# @@protoc_insertion_point(module_scope)
|
"""
"""
import os
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
BASE_URL = '/'
HOST = 'http://127.0.0.1' # This is for links in emails, must not end with a slash
# If WSGIScriptAlias of your Apache config already implements the base url, set to False
APPLY_BASE_URL_TO_URL_PATTERNS = True
# Application definition
INSTALLED_APPS = [
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webinterface.apps.WebinterfaceConfig',
'bootstrap3',
'crispy_forms',
'coverage',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cleansys.urls'
LOGIN_URL = reverse_lazy('webinterface:login-by-click')
LOGIN_REDIRECT_URL = reverse_lazy('webinterface:cleaner-no-page')
LOGOUT_REDIRECT_URL = reverse_lazy('webinterface:welcome')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cleansys.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'de-de'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGGING_PATH = os.path.join(BASE_DIR, 'logs')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
'style': '{',
},
'file_format': {
'format': '{levelname} {asctime} {message}',
'style': '{',
},
'simple': {
'format': '{levelname} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': { # Schedule model builds its loggers with this handler
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1000000,
'backupCount': 3,
'filename': os.path.join(LOGGING_PATH, 'general.log'),
'formatter': 'file_format',
'encoding': 'utf8',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
'filters': ['require_debug_false'],
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
},
'file_logger': {
'handlers': ['file'],
'level': 'INFO',
}
}
} # Further logging configuration is added in dev_settings and prod_settings
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_DIR = 'static/'
STATIC_URL = os.path.join(BASE_URL, STATIC_DIR)
STATIC_ROOT = os.path.join(BASE_DIR, STATIC_DIR)
MEDIA_DIR = 'media/'
MEDIA_URL = os.path.join(BASE_URL, MEDIA_DIR)
MEDIA_ROOT = os.path.join(BASE_DIR, MEDIA_DIR)
PLOT_PATH = os.path.join(MEDIA_ROOT)
CLEANER_ANALYTICS_FILE = os.path.join(PLOT_PATH, 'cleaner_analytics.html')
CRISPY_TEMPLATE_PACK = 'bootstrap3'
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': STATIC_URL + '/webinterface/jquery/jquery.min.js',
# The Bootstrap base URL
'base_url': STATIC_URL + '/webinterface/bootstrap-3.4.1-dist/',
# The complete URL to the Bootstrap CSS file (None means no theme)
# 'theme_url': STATIC_URL + '/webinterface/bootstrap-3.4.1-dist/css/bootstrap-theme.css',
}
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
engine= create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind= engine
DBSession= sessionmaker(bind= engine)
session= DBSession()
#how to use a session:
#newEntry= ClassName(property= "vale",...)
#session.add(newEntry)
#session.commit()
spinach= session.query(MenuItem).filter_by(name="Spinach Ice Cream").one()
#check if the restaurant is correct:
print spinach.restaurant.name
session.delete(spinach)
session.commit() |
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
from dash import dash
from dash.dependencies import Input, Output, State
from zvt.contract import Mixin
from zvt.contract import zvt_context, IntervalLevel
from zvt.contract.api import get_entities, get_schema_by_name, get_schema_columns
from zvt.contract.drawer import StackedDrawer
from zvt.ui import zvt_app
from zvt.utils import pd_is_not_null
def factor_layout():
layout = html.Div(
[
# controls
html.Div(
className="three columns card",
children=[
html.Div(
className="bg-white user-control",
children=[
# select entity_type
html.Div(
className="padding-top-bot",
children=[
html.H6("select entity type:"),
dcc.Dropdown(id='entity-type-selector',
placeholder='select entity type',
options=[{'label': name, 'value': name} for name in
zvt_context.entity_schema_map.keys()],
value='stock')
],
),
# select code
html.Div(
className="padding-top-bot",
children=[
html.H6("select code:"),
dcc.Dropdown(id='code-selector',
placeholder='select code')
],
),
# select levels
html.Div(
className="padding-top-bot",
children=[
html.H6("select levels:"),
dcc.Dropdown(
id='levels-selector',
options=[{'label': level.name, 'value': level.value} for level in
(IntervalLevel.LEVEL_1WEEK, IntervalLevel.LEVEL_1DAY)],
value='1d',
multi=True
)
],
),
# select factor
html.Div(
className="padding-top-bot",
children=[
html.H6("select factor:"),
dcc.Dropdown(id='factor-selector',
placeholder='select factor',
options=[{'label': name, 'value': name} for name in
zvt_context.factor_cls_registry.keys()],
value='TechnicalFactor')
]
),
# select data
html.Div(
children=[
html.Div(
[
html.H6("related/all data to show in sub graph",
style={"display": "inline-block"}),
daq.BooleanSwitch(
id='data-switch',
on=True,
style={"display": "inline-block",
"float": "right",
"vertical-align": "middle",
"padding": "8px"}
),
],
),
dcc.Dropdown(id='data-selector', placeholder='schema')
],
style={"padding-top": "12px"}
),
# select properties
html.Div(
children=[
dcc.Dropdown(id='schema-column-selector', placeholder='properties')
],
style={"padding-top": "6px"}
),
])
]),
# Graph
html.Div(
className="nine columns card-left",
children=[
html.Div(
id='factor-details'
)
])
]
)
return layout
@zvt_app.callback(
[Output('data-selector', 'options'),
Output('code-selector', 'options')],
[Input('entity-type-selector', 'value'), Input('data-switch', 'on')])
def update_code_selector(entity_type, related):
if entity_type is not None:
if related:
schemas = zvt_context.entity_map_schemas.get(entity_type)
else:
schemas = zvt_context.schemas
return [{'label': schema.__name__, 'value': schema.__name__} for schema in schemas], \
[{'label': code, 'value': code} for code in
get_entities(entity_type=entity_type, columns=['code']).index]
raise dash.PreventUpdate()
@zvt_app.callback(
Output('schema-column-selector', 'options'),
[Input('data-selector', 'value')])
def update_column_selector(schema_name):
if schema_name:
schema = get_schema_by_name(name=schema_name)
cols = get_schema_columns(schema=schema)
return [{'label': col, 'value': col} for col in cols]
raise dash.PreventUpdate()
@zvt_app.callback(
Output('factor-details', 'children'),
[Input('factor-selector', 'value'),
Input('entity-type-selector', 'value'),
Input('code-selector', 'value'),
Input('levels-selector', 'value'),
Input('schema-column-selector', 'value')],
state=[State('data-selector', 'value')])
def update_factor_details(factor, entity_type, code, levels, columns, schema_name):
if factor and entity_type and code and levels:
sub_df = None
if columns:
if type(columns) == str:
columns = [columns]
columns = columns + ['entity_id', 'timestamp']
schema: Mixin = get_schema_by_name(name=schema_name)
sub_df = schema.query_data(code=code, columns=columns)
if type(levels) is list and len(levels) >= 2:
levels.sort()
drawers = []
for level in levels:
drawers.append(zvt_context.factor_cls_registry[factor](
entity_schema=zvt_context.entity_schema_map[entity_type],
level=level, codes=[code]).drawer())
stacked = StackedDrawer(*drawers)
return dcc.Graph(
id=f'{factor}-{entity_type}-{code}',
figure=stacked.draw_kline(show=False, height=900))
else:
if type(levels) is list:
level = levels[0]
else:
level = levels
drawer = zvt_context.factor_cls_registry[factor](entity_schema=zvt_context.entity_schema_map[entity_type],
level=level,
codes=[code],
need_persist=False).drawer()
if pd_is_not_null(sub_df):
drawer.add_sub_df(sub_df)
return dcc.Graph(
id=f'{factor}-{entity_type}-{code}',
figure=drawer.draw_kline(show=False, height=800))
raise dash.PreventUpdate()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
def migrate_default_project_from_extra_json(meta, migrate_engine):
user_table = sql.Table('user', meta, autoload=True)
user_list = user_table.select().execute()
session = sessionmaker(bind=migrate_engine)()
for user in user_list:
try:
data = json.loads(user.extra)
default_project_id = data.pop('default_project_id', None)
v2_tenant_id = data.pop('tenantId', None)
alt_v2_tenant_id = data.pop('tenant_id', None)
except (ValueError, TypeError):
# NOTE(morganfainberg): Somehow we have non-json data here. This
# is a broken user, but it was broken beforehand. Cleaning it up
# is not in the scope of this migration.
continue
values = {}
if default_project_id is not None:
values['default_project_id'] = default_project_id
elif v2_tenant_id is not None:
values['default_project_id'] = v2_tenant_id
elif alt_v2_tenant_id is not None:
values['default_project_id'] = alt_v2_tenant_id
if 'default_project_id' in values:
values['extra'] = json.dumps(data)
update = user_table.update().where(
user_table.c.id == user['id']).values(values)
migrate_engine.execute(update)
session.commit()
session.close()
def migrate_default_project_to_extra_json(meta, migrate_engine):
user_table = sql.Table('user', meta, autoload=True)
user_list = user_table.select().execute()
session = sessionmaker(bind=migrate_engine)()
for user in user_list:
try:
data = json.loads(user.extra)
except (ValueError, TypeError):
# NOTE(morganfainberg): Somehow we have non-json data here. This
# is a broken user, but it was broken beforehand. Cleaning it up
# is not in the scope of this migration.
continue
# NOTE(morganfainberg): We don't really know what the original 'extra'
# property was here. Populate all of the possible variants we may have
# originally used.
if user.default_project_id is not None:
data['default_project_id'] = user.default_project_id
data['tenantId'] = user.default_project_id
data['tenant_id'] = user.default_project_id
values = {'extra': json.dumps(data)}
update = user_table.update().where(
user_table.c.id == user.id).values(values)
migrate_engine.execute(update)
session.commit()
session.close()
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
user_table = sql.Table('user', meta, autoload=True)
default_project_id = sql.Column('default_project_id', sql.String(64))
user_table.create_column(default_project_id)
migrate_default_project_from_extra_json(meta, migrate_engine)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
migrate_default_project_to_extra_json(meta, migrate_engine)
user_table = sql.Table('user', meta, autoload=True)
user_table.drop_column('default_project_id')
|
from coalib.bearlib.aspects import Root, Taste
@Root.subaspect
class Metadata:
"""
This describes any aspect that is related to metadata that is not
inside your source code.
"""
@Metadata.subaspect
class CommitMessage:
"""
Your commit message is important documentation associated with your
source code. It can help you to identify bugs (e.g. through
`git bisect`) or find missing information about unknown source code
(through `git blame`).
Commit messages are also sometimes used to generate - or write
manually - release notes.
"""
@CommitMessage.subaspect
class Emptiness:
"""
Your commit message serves as important documentation for your source
code.
"""
class docs:
example = '(no text at all)'
example_language = 'English'
importance_reason = """
An empty commit message shows the lack of documentation for your
change.
"""
fix_suggestions = 'Write a commit message.'
@CommitMessage.subaspect
class Shortlog:
"""
Your commit shortlog is the first line of your commit message. It is
the most crucial part and summarizes the change in the shortest possible
manner.
"""
@Shortlog.subaspect
class ColonExistence:
"""
Some projects force to use colons in the commit message shortlog
(first line).
"""
class docs:
example = """
FIX: Describe change further
context: Describe change further
"""
example_language = 'English'
importance_reason = """
The colon can be a useful separator for a context (e.g. a filename) so
the commit message makes more sense to the reader or a classification
(e.g. FIX, ...) or others. Some projects prefer not using colons
specifically: consistency is key.
"""
fix_suggestions = """
Add or remove the colon according to the commit message guidelines.
"""
shortlog_colon = Taste[bool](
'Whether or not the shortlog has to contain a colon.',
(True, False), default=True)
@Shortlog.subaspect
class TrailingPeriod:
"""
Some projects force not to use trailing periods in the commit
message shortlog (first line).
"""
class docs:
example = """
Describe change.
Describe change
"""
example_language = 'English'
importance_reason = """
Consistency is key to make messages more readable. Removing a trailing
period can also make the message shorter by a character.
"""
fix_suggestions = """
Add or remove the trailing period according to the commit message
guidelines.
"""
shortlog_period = Taste[bool](
'Whether or not the shortlog has to contain a trailing period.',
(True, False), default=False)
@Shortlog.subaspect
class Tense:
"""
Most projects have a convention on which tense to use in the commit
shortlog (the first line of the commit message).
"""
class docs:
example = """
Add file
Adding file
Added file
"""
example_language = 'English'
importance_reason = """
Consistency is key to make messages more readable.
"""
fix_suggestions = """
Rephrase the shortlog into the right tense.
"""
shortlog_tense = Taste[str](
'The tense of the shortlog.',
('imperative', 'present continuous', 'past'),
default='imperative')
@Shortlog.subaspect
class Length:
"""
The length of your commit message shortlog (first line).
"""
class docs:
example = """
Some people just write very long commit messages. Too long. "
Even full sentences. And more of them, too!
"""
example_language = 'English'
importance_reason = """
A good commit message should be quick to read and concise. Also, git
and platforms like GitHub do cut away everything beyond 72, sometimes
even 50 characters making any longer message unreadable.
"""
fix_suggestions = """
Try to compress your message:
- Using imperative tense usually saves a character or two
- Omitting a trailing period saves another character
- Leave out unneeded words or details
- Use common abbreviations like w/, w/o or &.
"""
max_shortlog_length = Taste[int](
'The maximal number of characters the shortlog may contain.',
(50, 72, 80), default=72)
@Shortlog.subaspect
class FirstCharacter:
"""
The first character of your commit message shortlog (first line) usually
should be upper or lower case consistently.
If the commit message contains a colon, only the first character after
the colon will be checked.
"""
class docs:
example = """
Add coverage pragma
Compatability: Add coverage pragma
add coverage pragma
Compatability: add coverage pragma
"""
example_language = 'English'
importance_reason = """
Consistent commit messages are easier to read through.
"""
fix_suggestions = """
Convert your first character to upper/lower case. If your message starts
with an identifier, consider rephrasing. Usually starting with a verb is
a good idea.
"""
shortlog_starts_upper_case = Taste[bool](
'Whether or not the shortlog (first line) of a commit message should '
'start with an upper case letter consistently.',
(True, False), default=True)
@CommitMessage.subaspect
class Body:
"""
Your commit body may contain an elaborate description of your commit.
"""
@Body.subaspect
class Existence:
"""
Forces the commit message body to exist (nonempty).
"""
class docs:
example = """
aspects: Add CommitMessage.Body
"""
example_language = 'English'
importance_reason = """
Having a nonempty commit body is important if you consistently want
elaborate documentation on all commits.
"""
fix_suggestions = """
Write a commit message with a body.
"""
@Body.subaspect
class Length:
"""
The length of your commit message body lines.
"""
class docs:
example = """
Some people just write very long commit messages. Too long.
Way too much actually. If they would just break their lines!
"""
example_language = 'English'
importance_reason = """
Git and platforms like GitHub usually break everything beyond 72
characters, making a message containing longer lines hard to read.
"""
fix_suggestions = """
Simply break your lines right before you hit the border.
"""
max_body_length = Taste[int](
'The maximal number of characters the body may contain in one line. '
'The newline character at each line end does not count to that length.',
(50, 72, 80), default=72)
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee._types import Image
def to_image_color(img: Image, target_mode: str):
"""
convert images from one color-space to another, like BGR ↔ Gray, BGR ↔ HSV, etc.
"""
# pylint: disable=import-outside-toplevel
from towhee.utils.cv2_utils import cv2
if not hasattr(img, 'mode'):
return img
if img.mode == target_mode:
return img
flag_name = 'COLOR_' + img.mode.upper() + '2' + target_mode.upper()
flag = getattr(cv2, flag_name, None)
if flag is None:
raise ValueError('Can not convert image from %s to %s.' % (img.mode, target_mode))
return Image(cv2.cvtColor(img, flag), target_mode.upper())
def from_pil(pil_img):
'''
Convert a PIL.Image.Image into towhee.types.Image.
Args:
pil_img (`PIL.Image.Image`):
A PIL image.
Returns:
(`towhee.types.Image`)
The image wrapepd as towhee Image.
'''
# pylint: disable=import-outside-toplevel
import numpy as np
return Image(np.array(pil_img), pil_img.mode)
def to_pil(img: Image):
"""
Convert a towhee.types.Image into PIL.Image.Image.
Args:
img (`towhee.types.Image`):
A towhee image.
Returns (`PIL.Image.Image`):
A PIL image.
"""
# pylint: disable=import-outside-toplevel
from PIL import Image as PILImage
return PILImage.fromarray(img, img.mode)
|
import argparse
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--subtitle",
help="subtitle path")
parser.add_argument("-t", "--tracks",
help="track list")
parser.add_argument("-p", "--prefix",
help="prefix")
args = parser.parse_args()
return args.subtitle, args.tracks, args.prefix.replace(' ', '_')
def write_track_list(tracks, track_list):
with open(track_list, 'w') as file:
file.write('\n'.join(tracks))
def get_filename_title(prefix, line):
title = line.strip().replace(' ', '_')
filename = '%s_%s' % (prefix, title.lower().replace(' ', '_'))
return filename, title
def append_track(tracks, start, line_timing, filename, current_chapter):
track = '%s %s %s %s' % (start, line_timing, filename, current_chapter)
tracks.append(track)
print('track: %s' % track)
def read_srt(src):
tracks = []
# read each line of the track list and split into start, end, name
with open(src, 'r') as f:
line_index = 0
seg_index = 0
seg_time = ''
seg_content = ''
for line in f:
# skip comment
if line.startswith('#'):
continue
# empty line
if len(line) <= 1:
line_index = 0
continue
# not empty lines
line_index += 1
# skip line numbering
if line_index == 1:
seg_index = line.strip()
continue
if line_index == 2: # subtitle time start-end
seg_time = line.strip()
continue
# line_index == 3: - subtitle content
seg_content = line.strip()
tracks.append((seg_index, seg_time, seg_content))
line_index = 0
# end for
return tracks
def transform_tracks(srt_segments, prefix):
start = '00:00:00'
current_chapter = ''
filename = ''
tracks = []
for _, seg_time, content in srt_segments:
if not content.startswith('Chapter '):
continue
line_timing, _, _ = seg_time.split(',')
# encounter first chapter, should not append track since it is just the begining
if len(current_chapter) <= 1:
filename, current_chapter = get_filename_title(prefix, "begin")
# begining of new chapter, should record current chapter
append_track(tracks, start, line_timing, filename, current_chapter)
# starting new chapter
filename, current_chapter = get_filename_title(prefix, content)
start = line_timing
# end for
# last chapter
if len(current_chapter) > 1:
_, seg_time, _ = srt_segments[-1]
line_timing, _, _ = seg_time.split(',')
append_track(tracks, start, line_timing, filename, current_chapter)
return tracks
def main():
"""create track list file from subtitle"""
# record command line args
subtitle, track_list, prefix = parse_arg()
srt_segments = read_srt(subtitle)
tracks = transform_tracks(srt_segments, prefix)
write_track_list(tracks, track_list)
if __name__ == '__main__':
main()
|
import sys, operator, math
sys.path.append('..')
import ZynqScope.ArmwaveRenderEngine as awre
# configure logger
import LoggingHandler, logging
log = logging.getLogger()
LoggingHandler.set_console_logger(log, logging.DEBUG)
# create armwave object
aobj = awre.ArmwaveRenderEngine()
def main():
print("aobj set_channel_colour")
aobj.set_channel_colour(1, (25, 180, 250), 10)
print("aobj set_target_dimensions")
aobj.set_target_dimensions(2048, 768)
print("")
print("### First Test Case ###")
print("")
print("aobj render_test_to_ppm")
aobj.render_test_to_ppm("armwave_test.ppm")
print("")
print("### Second Test Case ###")
print("")
print("aobj render_test")
aobj.render_test()
print("")
print("### All Done ###")
print("")
if __name__ == "__main__":
main()
|
from ms_mint.tools import generate_grid_peaklist
def test__generate_peaklist():
peaklist = generate_grid_peaklist([115], .1, intensity_threshold=10000)
assert peaklist is not None |
# Copyright 2018 Open Source Foundries Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''West's bootstrap/wrapper script.
'''
import argparse
import configparser
import os
import platform
import subprocess
import sys
import west._bootstrap.version as version
if sys.version_info < (3,):
sys.exit('fatal error: you are running Python 2')
#
# Special files and directories in the west installation.
#
# These are given variable names for clarity, but they can't be
# changed without propagating the changes into west itself.
#
# Top-level west directory, containing west itself and the manifest.
WEST_DIR = 'west'
# Subdirectory to check out the west source repository into.
WEST = 'west'
# Default west repository URL.
WEST_URL_DEFAULT = 'https://github.com/zephyrproject-rtos/west'
# Default revision to check out of the west repository.
WEST_REV_DEFAULT = 'master'
# File inside of WEST_DIR which marks it as the top level of the
# Zephyr project installation.
#
# (The WEST_DIR name is not distinct enough to use when searching for
# the top level; other directories named "west" may exist elsewhere,
# e.g. zephyr/doc/west.)
WEST_MARKER = '.west_topdir'
# Manifest repository directory under WEST_DIR.
MANIFEST = 'manifest'
# Default manifest repository URL.
MANIFEST_URL_DEFAULT = 'https://github.com/zephyrproject-rtos/manifest'
# Default revision to check out of the manifest repository.
MANIFEST_REV_DEFAULT = 'master'
#
# Helpers shared between init and wrapper mode
#
class WestError(RuntimeError):
pass
class WestNotFound(WestError):
'''Neither the current directory nor any parent has a West installation.'''
def west_dir(start=None):
'''
Returns the path to the west/ directory, searching ``start`` and its
parents.
Raises WestNotFound if no west directory is found.
'''
return os.path.join(west_topdir(start), WEST_DIR)
def west_topdir(start=None):
'''
Like west_dir(), but returns the path to the parent directory of the west/
directory instead, where project repositories are stored
'''
# If you change this function, make sure to update west.util.west_topdir().
cur_dir = start or os.getcwd()
while True:
if os.path.isfile(os.path.join(cur_dir, WEST_DIR, WEST_MARKER)):
return cur_dir
parent_dir = os.path.dirname(cur_dir)
if cur_dir == parent_dir:
# At the root
raise WestNotFound('Could not find a West installation '
'in this or any parent directory')
cur_dir = parent_dir
def clone(desc, url, rev, dest):
if os.path.exists(dest):
raise WestError('refusing to clone into existing location ' + dest)
if not url.startswith(('http:', 'https:', 'git:', 'git+ssh:', 'file:',
'git@')):
raise WestError('Unknown URL scheme for repository: {}'.format(url))
print('=== Cloning {} from {}, rev. {} ==='.format(desc, url, rev))
subprocess.check_call(('git', 'clone', '-b', rev, '--', url, dest))
#
# west init
#
def init(argv):
'''Command line handler for ``west init`` invocations.
This exits the program with a nonzero exit code if fatal errors occur.'''
# Remember to update scripts/west-completion.bash if you add or remove
# flags
init_parser = argparse.ArgumentParser(
prog='west init',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=
'''
Initializes a Zephyr installation. Use "west clone" afterwards to fetch the
sources.
In more detail, does the following:
1. Clones the manifest repository to west/manifest, and the west repository
to west/west
2. Creates a marker file west/{}
3. Creates an initial configuration file west/config
As an alternative to manually editing west/config, 'west init' can be rerun on
an already initialized West instance to update configuration settings. Only
explicitly passed configuration values (e.g. --mr MANIFEST_REVISION) are
updated.
Updating the manifest URL or revision via 'west init' automatically runs 'west
update --reset-manifest --reset-projects' afterwards to reset the manifest to
the new revision, and all projects to their new manifest revisions.
Updating the west URL or revision also runs 'west update --reset-west'.
To suppress the reset of the manifest, west, and projects, pass --no-reset.
With --no-reset, only the configuration file will be updated, and you will have
to handle any resetting yourself.
'''.format(WEST_MARKER))
init_parser.add_argument(
'-b', '--base-url',
help='''Base URL for both 'manifest' and 'zephyr' repositories. Cannot
be given if either -m or -w are.''')
init_parser.add_argument(
'-m', '--manifest-url',
help='Manifest repository URL (default: {})'
.format(MANIFEST_URL_DEFAULT))
init_parser.add_argument(
'--mr', '--manifest-rev', dest='manifest_rev',
help='Manifest revision to fetch (default: {})'
.format(MANIFEST_REV_DEFAULT))
init_parser.add_argument(
'-w', '--west-url',
help='West repository URL (default: {})'
.format(WEST_URL_DEFAULT))
init_parser.add_argument(
'--wr', '--west-rev', dest='west_rev',
help='West revision to fetch (default: {})'
.format(WEST_REV_DEFAULT))
init_parser.add_argument(
'--nr', '--no-reset', dest='reset', action='store_false',
help='''Suppress the automatic reset of the manifest, west, and project
repositories when re-running 'west init' in an existing
installation to update the manifest or west URL/revision''')
init_parser.add_argument(
'directory', nargs='?', default=None,
help='''Directory to initialize West in. Missing directories will be
created automatically. (default: current directory)''')
args = init_parser.parse_args(args=argv)
try:
reinit(os.path.join(west_dir(args.directory), 'config'), args)
except WestNotFound:
bootstrap(args)
def bootstrap(args):
'''Bootstrap a new manifest + West installation.'''
if args.base_url:
if args.west_url or args.manifest_url:
sys.exit('fatal error: -b is incompatible with -m and -w')
west_url = args.base_url.rstrip('/') + '/west'
manifest_url = args.base_url.rstrip('/') + '/manifest'
else:
west_url = args.west_url or WEST_URL_DEFAULT
manifest_url = args.manifest_url or MANIFEST_URL_DEFAULT
west_rev = args.west_rev or WEST_REV_DEFAULT
manifest_rev = args.manifest_rev or MANIFEST_REV_DEFAULT
directory = args.directory or os.getcwd()
if not os.path.isdir(directory):
try:
print('Initializing in new directory', directory)
os.makedirs(directory, exist_ok=False)
except PermissionError:
sys.exit('Cannot initialize in {}: permission denied'.format(
directory))
except FileExistsError:
sys.exit('Something else created {} concurrently; quitting'.format(
directory))
except Exception as e:
sys.exit("Can't create directory {}: {}".format(
directory, e.args))
else:
print('Initializing in', directory)
# Clone the west source code and the manifest into west/. Git will create
# the west/ directory if it does not exist.
clone('west repository', west_url, west_rev,
os.path.join(directory, WEST_DIR, WEST))
clone('manifest repository', manifest_url, manifest_rev,
os.path.join(directory, WEST_DIR, MANIFEST))
# Create an initial configuration file
config_path = os.path.join(directory, WEST_DIR, 'config')
update_conf(config_path, west_url, west_rev, manifest_url, manifest_rev)
print('=== Initial configuration written to {} ==='.format(config_path))
# Create a dotfile to mark the installation. Hide it on Windows.
with open(os.path.join(directory, WEST_DIR, WEST_MARKER), 'w') as f:
hide_file(f.name)
print('=== West initialized. Now run "west clone" in {}. ==='.
format(directory))
def reinit(config_path, args):
'''
Reinitialize an existing installation.
This updates the west/config configuration file, and optionally resets the
manifest, west, and project repositories to the new revision.
'''
if args.base_url:
if args.west_url or args.manifest_url:
sys.exit('fatal error: -b is incompatible with -m and -w')
west_url = args.base_url.rstrip('/') + '/west'
manifest_url = args.base_url.rstrip('/') + '/manifest'
else:
west_url = args.west_url
manifest_url = args.manifest_url
if not (west_url or args.west_rev or manifest_url or args.manifest_rev):
sys.exit('West already initialized. Please pass any settings you '
'want to change.')
update_conf(config_path, west_url, args.west_rev, manifest_url,
args.manifest_rev)
print('=== Updated configuration written to {} ==='.format(config_path))
if args.reset:
cmd = ['update']
if manifest_url or args.manifest_rev:
cmd += ['--reset-manifest', '--reset-projects']
if west_url or args.west_rev:
cmd.append('--reset-west')
print("=== Running 'west {}' to update repositories ==="
.format(' '.join(cmd)))
wrap(cmd)
def update_conf(config_path, west_url, west_rev, manifest_url, manifest_rev):
'''
Creates or updates the configuration file at 'config_path' with the
specified values. Values that are None/empty are ignored.
'''
config = configparser.ConfigParser()
# This is a no-op if the file doesn't exist, so no need to check
config.read(config_path)
update_key(config, 'west', 'remote', west_url)
update_key(config, 'west', 'revision', west_rev)
update_key(config, 'manifest', 'remote', manifest_url)
update_key(config, 'manifest', 'revision', manifest_rev)
with open(config_path, 'w') as f:
config.write(f)
def update_key(config, section, key, value):
'''
Updates 'key' in section 'section' in ConfigParser 'config', creating
'section' if it does not exist.
If value is None/empty, 'key' is left as-is.
'''
if not value:
return
if section not in config:
config[section] = {}
config[section][key] = value
def hide_file(path):
'''Ensure path is a hidden file.
On Windows, this uses attrib to hide the file manually.
On UNIX systems, this just checks that the path's basename begins
with a period ('.'), for it to be hidden already. It's a fatal
error if it does not begin with a period in this case.
On other systems, this just prints a warning.
'''
system = platform.system()
if system == 'Windows':
subprocess.check_call(['attrib', '+H', path])
elif os.name == 'posix': # Try to check for all Unix, not just macOS/Linux
if not os.path.basename(path).startswith('.'):
sys.exit("internal error: {} can't be hidden on UNIX".format(path))
else:
print("warning: unknown platform {}; {} may not be hidden"
.format(system, path), file=sys.stderr)
#
# Wrap a West command
#
def append_to_pythonpath(directory):
pp = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = ':'.join(([pp] if pp else []) + [directory])
def wrap(argv):
printing_version = False
printing_help_only = False
if argv:
if argv[0] in ('-V', '--version'):
print('West bootstrapper version: v{} ({})'.
format(version.__version__, os.path.dirname(__file__)))
printing_version = True
elif len(argv) == 1 and argv[0] in ('-h', '--help'):
# This only matters if we're called outside of an
# installation directory. We delegate to the main help if
# called from within one, because it includes a list of
# available commands, etc.
printing_help_only = True
start = os.getcwd()
try:
topdir = west_topdir(start)
except WestNotFound:
if printing_version:
sys.exit(0) # run outside of an installation directory
elif printing_help_only:
# We call print multiple times here and below instead of using
# \n to be newline agnostic.
print('To set up a Zephyr installation here, run "west init".')
print('Run "west init -h" for additional information.')
sys.exit(0)
else:
print('Error: "{}" is not a Zephyr installation directory.'.
format(start), file=sys.stderr)
print('Things to try:', file=sys.stderr)
print(' - Run "west init" to set up an installation here.',
file=sys.stderr)
print(' - Run "west init -h" for additional information.',
file=sys.stderr)
sys.exit(1)
west_git_repo = os.path.join(topdir, WEST_DIR, WEST)
if printing_version:
try:
git_describe = subprocess.check_output(
['git', 'describe', '--tags'],
stderr=subprocess.DEVNULL,
cwd=west_git_repo).decode(sys.getdefaultencoding()).strip()
print('West repository version: {} ({})'.format(git_describe,
west_git_repo))
except subprocess.CalledProcessError:
print('West repository verison: unknown; no tags were found')
sys.exit(0)
# Replace the wrapper process with the "real" west
# sys.argv[1:] strips the argv[0] of the wrapper script itself
argv = ([sys.executable,
os.path.join(west_git_repo, 'src', 'west', 'main.py')] +
argv)
append_to_pythonpath(os.path.join(west_git_repo, 'src'))
try:
subprocess.check_call(argv)
except subprocess.CalledProcessError as e:
sys.exit(1)
#
# Main entry point
#
def main(wrap_argv=None):
'''Entry point to the wrapper script.'''
if wrap_argv is None:
wrap_argv = sys.argv[1:]
if not wrap_argv or wrap_argv[0] != 'init':
wrap(wrap_argv)
else:
init(wrap_argv[1:])
sys.exit(0)
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
from ldetect2.src.m2v import mat2vec
from sys import argv
partitions = argv[1]
theta2 = argv[2]
covariances = argv[3:]
theta2 = float(open(theta2).readline().strip())
import sys
# print(partitions, file=sys.stderr)
# print(covariances, file=sys.stderr)
# partitions = snakemake.input["partitions"]
# covariances = snakemake.input["covariances"]
dfs = []
from time import time
length = 0
memory_use = 0
for i, c in enumerate(covariances):
start = time()
df = pd.read_parquet(c)
length += len(df)
memory_use += df.memory_usage(deep=True).sum()
dfs.append(df)
end = time()
print(i, c, i/len(covariances), end - start, length, memory_use / 1e9, file=sys.stderr)
covariances = sorted(covariances, key=lambda k: k.split("/"))
df = pd.concat(dfs)
print("Done concatenating!")
print("df.memory_usage(deep=True).sum()", df.memory_usage(deep=True).sum())
# s = pd.read_csv(covariances[-1], sep=" ", usecols=[2], names=["i"],
# dtype={"i": np.int32}, squeeze=True)
# max_ = s.max()
ps = pd.read_table(partitions, sep=" ", header=None)
new_ends = ((ps[0] + ps[1].shift(-1).values)/2)
new_ends = new_ends.fillna(df.i.max()).astype(np.int32)
ps.insert(ps.shape[1], 2, new_ends)
# assert len(ps) == len(covariances), "Number of partitions and covariance files are not the same!"
mat2vec(df.i.values, df.j.values, df.val.values, ps, theta2)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.HomeView.as_view(),
name="home"),
url(_(r'^signatures-(?P<slug>[\w-]+)$'), views.SignatureListView.as_view(),
name="list"),
url(_(r'^text-(?P<slug>[\w-]+)$'), views.PetitionDetailView.as_view(),
name="text"),
url(_(r'^form-(?P<slug>[\w-]+)$'), views.FormView.as_view(),
name="form"),
url(_(r'^form-(?P<slug>[\w-]+)/thanks$'), views.ThanksView.as_view(),
name="thanks"),
url(r'^(?P<category_slug>[\w-]+)$', views.CategoryView.as_view(),
name="category"),
]
|
import base64
import multiprocessing
import sys
from concurrent import futures
import cv2
import grpc
import numpy as np
import work_object_detection_pb2
import work_object_detection_pb2_grpc
from work_object_detection_model import const
from work_object_detection_model.model import Model
# SERVER_PORT = 50051
SERVER_PORT = 50052
# WEIGHTS_PATH = f'./weights/2020-05-18T12:28:25.901703'
WEIGHTS_PATH = f'/home/latona/hades/BackendService/work-object-detection-server/weights/2020-05-24T19:30:23.098970-epoch=05-val_loss=0.02/variables/variables'
IMAGE_HEIGHT = 1200
IMAGE_WIDTH = 1920
N_SPLIT_HEIGHT = 2
N_SPLIT_WIDTH = 3
SPLIT_HEIGHT = 600
SPLIT_WIDTH = 640
model = Model()
model.compile()
model.load_weights(WEIGHTS_PATH)
def to_ndarray(image):
image = base64.b64decode(image)
image = np.frombuffer(image, dtype=np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
def predict(image):
image = cv2.resize(image, (const.IMAGE_WIDTH, const.IMAGE_HEIGHT))
images = image[np.newaxis, :]
images = images / 255
results = model.predict(images)
accuracy = float(results[0, 0])
is_work = True if accuracy > 0.5 else False
return accuracy, is_work
def split(image):
images = []
for n_w in range(N_SPLIT_WIDTH):
for n_h in range(N_SPLIT_HEIGHT):
h1 = SPLIT_HEIGHT * n_h
h2 = SPLIT_HEIGHT * (n_h + 1)
w1 = SPLIT_WIDTH * n_w
w2 = SPLIT_WIDTH * (n_w + 1)
_image = image[h1:h2, w1:w2]
images.append(_image)
return images
def predict_multiple(images):
new_images = []
for image in images:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image / 255
image = cv2.resize(image, (const.IMAGE_WIDTH, const.IMAGE_HEIGHT))
new_images.append(image)
new_images = np.array(new_images)
results = model.predict(new_images)
all_accuracy = results[:, 0]
all_is_work = np.where(all_accuracy > 0.5, True, False)
accuracy = float(np.mean(all_accuracy))
is_work = bool(np.any(all_is_work))
return accuracy, is_work, all_accuracy.tolist(), all_is_work.tolist()
def put_text(image, accuracy, is_work):
height, width, _ = image.shape
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.0
font_color = (255, 255, 255)
thickness = 2
text1 = f'{accuracy}'
text_width1, text_height1 = cv2.getTextSize(
text1, font, font_scale, thickness)[0]
cv2.putText(
image, text1,
(width - text_width1, text_height1),
font, font_scale, font_color, thickness)
text2 = f'{is_work}'
text_width2, text_height2 = cv2.getTextSize(
text2, font, font_scale, thickness)[0]
cv2.putText(
image, text2,
(width - text_width2, text_height1 + text_height2),
font, font_scale, font_color, thickness)
return
def debug_show(queue):
cv2.namedWindow('work-object-detection-server')
while True:
debug_name, images, all_accuracy, all_is_work = queue.get()
for image, accuracy, is_work in zip(images, all_accuracy, all_is_work):
put_text(image, accuracy, is_work)
if debug_name == 'single':
image = images[0]
elif debug_name == 'multiple':
image1 = np.concatenate((images[0], images[1]), axis=0)
image2 = np.concatenate((images[2], images[3]), axis=0)
image3 = np.concatenate((images[4], images[5]), axis=0)
image = np.concatenate((image1, image2, image3), axis=1)
else:
print(f'Ignore debug request which debug_name is {debug_name}.')
cv2.imshow('work-object-detection-server', image)
cv2.waitKey(1)
return
class WorkObjectDetectionServicer(work_object_detection_pb2_grpc.WorkObjectDetectionServicer):
def __init__(self):
super().__init__()
if sys.flags.debug:
print('*' * 50)
print('Debug start')
print('*' * 50)
ctx = multiprocessing.get_context('spawn')
self.queue = ctx.Queue()
self.process = ctx.Process(target=debug_show, args=(self.queue,))
self.process.start()
def __del__(self):
if sys.flags.debug:
self.process.terminate()
def Predict(self, request, context):
print(f'request.date = {request.date}')
image = to_ndarray(request.image)
accuracy, is_work = predict(image)
if sys.flags.debug:
self.queue.put(('single', [image], [accuracy], [is_work]))
return work_object_detection_pb2.Detection(accuracy=accuracy, is_work=is_work)
def SplitAndPredict(self, request, context):
print(f'request.date = {request.date}.')
image = to_ndarray(request.image)
height, width, channel = image.shape
if not (height == IMAGE_HEIGHT and width == IMAGE_WIDTH):
error = f'Image shape must be ({IMAGE_HEIGHT, IMAGE_WIDTH, 3}, but requested image shape is ({height}, {width}, {channel}))'
return work_object_detection_pb2.Detections(
status=False, error=error,
accuracy=None, is_work=None,
all_accuracy=None, all_is_work=None)
images = split(image)
accuracy, is_work, all_accuracy, all_is_work = predict_multiple(images)
if sys.flags.debug:
self.queue.put(('multiple', images, all_accuracy, all_is_work))
return work_object_detection_pb2.Detections(
status=True, error=None,
accuracy=accuracy, is_work=is_work,
all_accuracy=all_accuracy, all_is_work=all_is_work)
def run_server():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
work_object_detection_pb2_grpc.add_WorkObjectDetectionServicer_to_server(
WorkObjectDetectionServicer(), server
)
server.add_insecure_port(f'[::]:{SERVER_PORT}')
server.start()
print(f'Listening [::]:{SERVER_PORT}')
server.wait_for_termination()
return
if __name__ == "__main__":
run_server()
|
import sys
import torch
import torch.nn.functional as F
#from warp_rnnt import rnnt_loss as loss1
#from warprnnt_pytorch import rnnt_loss as loss2
from transducer.functions.transducer import Transducer
from timeit import default_timer as timer
def run_loss1(xs, ys, xn, yn):
xs = F.log_softmax(xs, -1)
return loss1(xs, ys, xn, yn)
def run_loss2(xs, ys, xn, yn):
return loss2(xs, ys, xn, yn, reduction='none')
def run_loss3(xs, ys, xn, yn):
xs = F.log_softmax(xs, -1)
fn = Transducer(blank_label=0)
return fn(xs, ys.view(-1), xn, yn)
def run_benchmark(loss, E, N, T, U, V):
torch.manual_seed(N)
elapsed_time = 0
for i in range(E):
xs = torch.randn((N, T, U, V), dtype=torch.float32, requires_grad=True)
ys = torch.randint(1, V, (N, U-1), dtype=torch.int)
#xn = torch.randint(T // 2, T+1, (N,), dtype=torch.int)
#yn = torch.randint(U // 2, U, (N,), dtype=torch.int)
#xn = xn + T - xn.max()
#yn = yn + U-1 - yn.max()
xn = torch.ones((N,), dtype=torch.int) * T
yn = torch.ones((N,), dtype=torch.int) * (U-1)
#xs = xs.cuda()
#ys = ys.cuda()
#xn = xn.cuda()
#yn = yn.cuda()
t = timer()
costs = loss(xs, ys, xn, yn)
elapsed_time += timer() - t
del xs, ys, xn, yn, costs
torch.cuda.empty_cache()
elapsed_time = elapsed_time * 1000 / E
print("%d: %.2f" % (N, elapsed_time))
def run_benchmark_safe(loss, E, N, T, U, V):
try:
run_benchmark(loss, E, N, T, U, V)
except RuntimeError:
exc_type, value, traceback = sys.exc_info()
print(value)
for n in [1, 16, 32, 64, 128]:
for loss in [run_loss3]:
#run_benchmark(loss, E=100, N=n, T=150, U=40, V=28)
#run_benchmark_safe(loss, E=10, N=n, T=150, U=20, V=5000)
run_benchmark_safe(loss, E=10, N=n, T=1500, U=300, V=50)
|
""" Sthis adds 'created_at', 'updated_at' and 'delete_at' fields like a rail apps in django,
also added soft delete method.
Copyright (c) 2018, Carlos Ganoza Plasencia
url: http://carlosganoza.com
"""
from django.contrib import admin
class ParanoidAdmin(admin.ModelAdmin):
readonly_fields = ('created_at','deleted_at','updated_at')
|
import subprocess
import sys
import os
import os.path
def cd(path):
os.chdir(path)
def cdToScript():
cd(os.path.dirname(os.path.abspath(__file__)))
def conv(path):
print('Convert {}'.format(path))
temp = open(path,mode='rb')
byte = temp.read(1)
temp.close()
if b'\xef' == byte:
return
data = None
if data is None:
try:
fin = open(path,mode='r',encoding='cp932')
data = fin.read()
fin.close()
except:
pass
if data is None:
try:
fin = open(path,mode='r',encoding='utf-8')
data = fin.read()
fin.close()
except:
pass
if data is None:
raise Exception('invalid encode')
fout = open(path,mode='w',encoding='utf-8-sig')
fout.write(data)
fout.close()
def get_files(path):
""" get files.
"""
def getlistdir(path,l):
dirs = os.listdir(path)
for d in dirs:
newpath = os.path.join( path, d )
if os.path.isdir( newpath ):
getlistdir( newpath, l )
else:
l.append( newpath )
ret = []
getlistdir( path, ret )
return ret
def conv_dir(d):
files = get_files(d)
for file in files:
root, ext = os.path.splitext(file)
if not ext in ['.cpp','.h']:
continue
conv(file)
def exec_sync( cmd ):
""" exec command line.
"""
print( cmd )
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
print('')
cdToScript()
conv_dir(r'asd_cpp/')
conv_dir(r'unitTest_cpp_gtest/')
conv_dir(r'unitTest_Engine_cpp_gtest/')
argv = sys.argv
if len(argv) == 1 or argv[1] == 'csharp':
if not os.path.isdir('asd_cs/swig'):
os.makedirs('asd_cs/swig')
exec_sync( 'swig -c++ -csharp -namespace asd.swig -dllimport Altseed_core -o asd_cpp/core/dll.cxx -outdir asd_cs/swig/ swig.i' )
f = open(r'asd_cs/swig/asd_corePINVOKE.cs', 'r', encoding='utf-8')
lines = f.read()
f.close()
lines = lines.replace(r'"Altseed_core"', r'asd.Particular.Define.DLL')
f = open(r'asd_cs/swig/asd_corePINVOKE.cs', 'w')
f.write(lines)
f.close()
elif argv[1] == 'java':
if not os.path.isdir('asd_java'):
os.makedirs('asd_java')
if not os.path.isdir('asd_java/swig'):
os.makedirs('asd_java/swig')
exec_sync( 'swig -c++ -java -package asd.swig -o asd_cpp/core/dll.cxx -outdir asd_java/swig/ swig.i' ) |
#!/usr/local/bin/python3
import serial
#SERIAL_DEVICE = "/dev/tty.SLAB_USBtoUART"
SERIAL_DEVICE = "/dev/tty.usbserial-1420"
DISABLE_MOSFETS_COMMAND = 0
ENABLE_MOSFETS_COMMAND = 1
SET_POSITION_AND_MOVE_COMMAND = 2
SET_VELOCITY_COMMAND = 3
SET_POSITION_AND_FINISH_TIME_COMMAND = 4
SET_ACCELERATION_COMMAND = 5
START_CALIBRATION_COMMAND = 6
CAPTURE_HALL_SENSOR_DATA_COMMAND = 7
RESET_TIME_COMMAND = 8
GET_CURRENT_TIME_COMMAND = 9
TIME_SYNC_COMMAND = 10
def read_and_print(ser):
data = ser.read(1000)
print("Received %d bytes" % (len(data)))
print(data)
for d in data:
print("0x%02X %d" % (d, d))
ser = serial.Serial(SERIAL_DEVICE, 230400, timeout = 0.5) # open serial port
print(ser.name) # check which port was really used
ser.write(bytearray([ord('E'), RESET_TIME_COMMAND, 0]))
read_and_print(ser)
ser.write(bytearray([ord('E'), GET_CURRENT_TIME_COMMAND, 0]))
read_and_print(ser)
ser.close()
|
import setuptools
from numpy.distutils.core import setup, Extension
import os
import sys
import io
import re
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
if sys.argv[-1] == "publish":
os.system("rm dist/*")
os.system("python setup.py sdist")
os.system("python setup.py bdist_wheel")
os.system("twine upload dist/*")
sys.exit()
fort = Extension('halomod.fort.routines', ['halomod/fort/routines.f90'],
extra_f90_compile_args=['-O3', '-Wall', '-Wtabs'],
f2py_options=['--quiet', 'only:', 'power_gal_2h',
'power_gal_1h_ss', 'corr_gal_1h_ss',
'corr_gal_1h_cs', 'power_to_corr',
'corr_gal_1h', 'get_subfind_centres', ':'])
corr_2h = Extension('halomod.fort.twohalo', ['halomod/fort/twohalo.f90'],
extra_f90_compile_args=['-Wall', '-Wtabs', '-fopenmp'],
f2py_options=['only:', "power_to_corr", "twohalo", "dblsimps", ":"],
libraries=['gomp']
)
if __name__ == "__main__":
setup(
name="halomod",
version=find_version("halomod","__init__.py"),
install_requires=['hmf>=2.0.0',
'mpmath',
'cached_property',
'numpy',
'scipy'],
scripts=['scripts/pophod',
'scripts/halomod-fit'],
author="Steven Murray",
author_email="steven.murray@curtin.edu.au",
description="A Halo Model calculator built on hmf",
long_description=read('README.rst'),
license='MIT',
keywords="halo occupation distribution",
url="https://github.com/steven-murray/halomod",
ext_modules=[fort, corr_2h] if os.getenv("WITH_FORTRAN",None) else [],
packages=['halomod', 'halomod.fort'] if os.getenv("WITH_FORTRAN",None) else ['halomod'],
package_data={"halomod":['data/*']}
)
|
#!/bin/python
import numpy
import scipy.ndimage
from pyami import mrc
if __name__ == "__main__":
## using scipy.ndimage to find blobs
labelstruct = numpy.ones((3,3,3))
def scipyblobs(im,mask):
labels,n = scipy.ndimage.label(mask, labelstruct)
## too bad ndimage module is inconsistent with what is returned from
## the following functions. Sometiems a list, sometimes a single value...
if n==0:
centers = []
sizes = []
stds = []
means = []
else:
centers = scipy.ndimage.center_of_mass(im,labels,range(1,n+1))
sizes = scipy.ndimage.histogram(labels,1,n+1,n)
stds = scipy.ndimage.standard_deviation(im,labels,range(1,n+1))
means = scipy.ndimage.mean(im,labels,range(1,n+1))
if n==1:
centers = [centers]
stds = [stds]
means = [means]
else:
centers = map(numpy.array, centers)
blobs = []
for i in range(n):
blobs.append({'center':centers[i], 'n':sizes[i], 'mean':means[i],'stddev':stds[i]})
return blobs
# input parameters
scale_factor = 60.0 #scale the map so that the we can pretend the atom sphere as a gold bead
threshold = -6172+float((14509+6172)*(170))/256 #map intensity threshold to generate blobs
minsize = 5 #minimal blob size to be considered as a gold cluster
# end of input variables
input1 = raw_input('Enter the .mrc for pdb conversion: ')
image = mrc.read(input1)
output1 = raw_input('Enter the destination name: ')
out = open(output1,'w')
line = "HEADER " + output1 + "\n"
out.write(line)
shape = image.shape
print shape
scale = float(scale_factor)/min(shape)
lattice = [shape[0]*scale,shape[1]*scale,shape[2]*scale]
lattice.extend([90.0,90.0,90.0])
# lattice parameter and transformation output in pdb format
line = "CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1 1\n" %tuple(lattice)
line += "ORIGX1%10.6f%10.6f%10.6f%10.5f\n" %(1.0,0.0,0.0,0.0)
line += "ORIGX2%10.6f%10.6f%10.6f%10.5f\n" %(0.0,1.0,0.0,0.0)
line += "ORIGX3%10.6f%10.6f%10.6f%10.5f\n" %(0.0,0.0,1.0,0.0)
line += "SCALE1%10.6f%10.6f%10.6f%10.5f\n" %(1.0/shape[0],0.0,0.0,0.0)
line += "SCALE2%10.6f%10.6f%10.6f%10.5f\n" %(0.0,1.0/shape[1],0.0,0.0)
line += "SCALE3%10.6f%10.6f%10.6f%10.5f\n" %(0.0,0.0,1.0/shape[2],0.0)
out.write(line)
maskimg = numpy.where(image>=threshold,1,0)
blobs = scipyblobs(image,maskimg)
print "total blobs of any size=",len(blobs)
i = 0
if len(blobs) > 1:
for blob in blobs:
if blob['n'] > minsize:
i = i+1
center = blob['center']*scale
# this line output each blob center as a Chloride ion in pdb format
line = "HETATM%5d CL CL %4d%12.3f%8.3f%8.3f 1.00 0.00 AU\n" %(i,i,center[0],center[1],center[2])
out.write(line)
out.write('END\n')
out.close()
print i
else:
print blobs
print "too few blobs"
|
old_file = open('../one/one_1.cfg', mode='r')
lines = old_file.readlines()
for index in range(1,21):
new_file = open('two_{}.cfg'.format(index), mode='w')
for line in lines:
if 'doom_scenario_path' in line:
line = "doom_scenario_path = two_{}.wad".format(index)
new_file.write(line)
new_file.close()
old_file.close() |
from . import api
from flask import jsonify
from ..models import Comment
@api.route('/comments/')
def get_comments():
comments = Comment.query.all()
return jsonify({'comments':[comment.to_json() for comment in comments]})
@api.route('/comments/<int:id>')
def get_comment(id):
comment = Comment.query.get_or_404(id)
return jsonify(comment.to_json())
|
# 本脚本是针对中国新冠病毒各省市历史发病数据的清洗工具
# 作者 https://github.com/Avens666 mail: cz_666@qq.com
# 源数据来自 https://github.com/BlankerL/DXY-COVID-19-Data/blob/master/csv/DXYArea.csv
# 本脚本将各省市每天的数据进行去重处理,每个省市只保留最新的一条数据 (也可选择保留当天最大数值)
# 因为省市的“疑似数据 suspectedCount”参考意义不大,没有进行处理和导出
# 用户通过修改 inputfile 和 outputfile 定义源数据文件和输出文件
import pandas
from datetime import timedelta
input_file = "data2.18.csv" # "t15.csv"
output_file = "out1.csv"
# pandas显示配置 方便调试
# 显示所有列
pandas.set_option('display.max_columns', None)
# 显示所有行
pandas.set_option('display.max_rows', None)
# 设置value的显示长度为100,默认为50
pandas.set_option('max_colwidth', 200)
# !!! 根据需要选择合适的字符集
try:
dataf = pandas.read_csv(input_file, encoding='UTF-8')
except:
dataf = pandas.read_csv(input_file, encoding='gb2312')
dataf['updateTime'] = pandas.to_datetime(dataf['updateTime'])
dataf['date'] = dataf['updateTime'].apply(lambda x: x.strftime('%Y-%m-%d'))
dataf['date'] = pandas.to_datetime(dataf['date'])
# print(type(dataf)) print(dataf.dtypes) print(dataf.head())
# 提取省列表
df_t = dataf['provinceName']
df_province = df_t.drop_duplicates() # 去重 这个返回Series对象
# df_province = df_t.unique() # 去重 这个返回 ndarray
df = pandas.DataFrame()
df_t = dataf['date']
df_date = df_t.drop_duplicates() # 去重 返回Series对象
df_date = df_date.sort_values()
for date_t in df_date:
for name in df_province:
print(date_t.strftime('%Y-%m-%d') + name) # 输出处理进度
df1 = dataf.loc[(dataf['provinceName'].str.contains(name)) & (dataf['date'] == date_t), :]
df1 = df1.loc[(df1['updateTime'] == df1['updateTime'].max()), :] # 筛出省的最后数据 避免之前时间的市数据干扰,产生孤立值
df_t = df1['cityName']
df_city = df_t.drop_duplicates() # 去重 这个返回Series对象
province_confirmedCount = df1['province_confirmedCount'].max()
province_curedCount = df1['province_curedCount'].max()
province_deadCount = df1['province_deadCount'].max()
for city in df_city:
df2 = df1.loc[(df1['cityName'].str.contains(city)), :] # df2筛选出某个市的数据
# 使用当天最后时间的数据,注释这行,则使用当天最大值提取数据
df2 = df2.loc[(df2['updateTime'] == df2['updateTime'].max()), :]
new = pandas.DataFrame({'省': name,
'省确诊': province_confirmedCount,
'省治愈': province_curedCount,
'省死亡': province_deadCount,
'市': city,
'确诊': df2['city_confirmedCount'].max(),
'治愈': df2['city_curedCount'].max(),
'死亡': df2['city_deadCount'].max(),
'日期': date_t},
pandas.Index(range(1)))
# print(new.head())
df = df.append(new)
# 补齐一个省的空数据
for date_t in df_date:
# print(date_t.strftime('%Y-%m-%d') + name) # 输出处理进度
if date_t == df_date.max(): # 最后一天不处理
continue
date_add = date_t + timedelta(days=1)
for name in df_province:
df1 = df.loc[(df['省'].str.contains(name)) & (df['日期'] == date_t), :]
if df1.shape[0] > 0:
df2 = df.loc[
(df['省'].str.contains(name)) & (df['日期'] == date_add),
:]
if df2.shape[0] == 0: # 后面一天省数据为空 把当前数据填到后一天
print('追加 ' + date_add.strftime('%Y-%m-%d') + name) # 输出处理进度
for index, data in df1.iterrows(): # 改变值 使用索引
df1.loc[index, '日期'] = date_add
df = df.append(df1)
# print(df)
df.to_csv(output_file, encoding="utf_8_sig") # 为保证excel打开兼容,输出为UTF8带签名格式
|
import copy
from Harmony import Chord
class ChordProg:
prog_delim = ','
def __init__(self, chordString: str = "") -> None:
self.chords: list = []
self.parse_prog(chordString)
def add(self, chord: Chord):
self.chords.append(chord)
def parse_prog(self, string: str):
if len(string) > 0:
chunks = string.split(ChordProg.prog_delim)
for cstr in chunks:
fstr = cstr.strip()
print(f"chunk: {fstr}")
chord = Chord(fstr)
self.add(chord)
def transpose(self, semitones: int):
for chord in self.chords:
chord.transpose(semitones)
def arp(self):
prog2 = copy.deepcopy(self)
def __str__(self):
cstr = ""
for chord in self.chords:
cstr += chord.get_chord_name() + ', '
rstr = f"[Chord Prog]: {cstr}"
return rstr
|
def application(env, start_response):
start_response("200 OK", [("Content-Type", "text/html")])
return [
b"Hello World from a default Nginx uWSGI Python 3.6 app in a\
Docker container (default)"
]
|
import pandas as pd
from .well_data import WellInterface
from nl_project.input_layer.get_data import GetProjectData
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from skimage.filters import sobel
class CorePhotoInterface(WellInterface):
"""Interface to work with core photo data"""
lazy_load = True
def __init__(self, name):
super().__init__(name)
self.core_photo_metadata = None
self.loaded_photos = {}
print('WARNING: Loading core photos, may take a long time the first time!')
self.data = self.get_data(key='Core Photo Data')
def report_available_core_photos(self) -> pd.DataFrame:
return self.core_photo_metadata
def get_sample_at_depth(self, md, sample_thickness=0.08) -> object:
photos = self.get_photos_for_depth_range(md - sample_thickness, md + sample_thickness)
sample_photos = photos['filtered_photos']
top_idx = sample_photos.columns.get_loc('top_depth')
btm_idx = sample_photos.columns.get_loc('bottom_depth')
photo_top = sample_photos.iat[0, top_idx]
photo_btm = sample_photos.iat[sample_photos.shape[0] - 1, btm_idx]
if sample_photos.empty:
print('no core photos for this depth')
return None
else:
photo = photos['interval 1']
photo_thickness = photo_btm - photo_top
px_per_m = photo.shape[0] / photo_thickness
start_pix = int((md - sample_thickness - photo_top) * px_per_m)
end_pix = int((md + sample_thickness - photo_top) * px_per_m)
photo_sample = photo[start_pix:end_pix, 50:350, :]
return photo_sample
def get_photos_for_depth_range(self, start_md, end_md):
start_md_filter = float(int(start_md))
top_filter = self.core_photo_metadata['top_depth'] >= start_md_filter
bottom_filter = self.core_photo_metadata['bottom_depth'] < end_md + 1.0
filtered_photos = self.core_photo_metadata.loc[top_filter & bottom_filter, :]
filtered_photos = filtered_photos.loc[~filtered_photos['top_depth'].duplicated()]
change_points = filtered_photos['top_depth'].diff() > 4.0
list_of_core_changes = list(change_points.loc[change_points == True].index)
list_of_core_changes = [0] + list_of_core_changes
photos_out = {}
interval = 1
for ind, x in enumerate(list_of_core_changes):
if x != list_of_core_changes[-1]:
photos = filtered_photos.loc[x:list_of_core_changes[ind + 1]]
meta = self.core_photo_metadata.loc[x:list_of_core_changes[ind + 1]]
else:
photos = filtered_photos.loc[x:]
meta = self.core_photo_metadata.loc[x:]
photos_out[f'interval {interval}'] = self._clean_up_photos(photos)
photos_out[f'interval {interval} top'] = meta.loc[x, 'top_depth']
photos_out[f'interval {interval} bottom'] = meta.loc[x, 'top_depth']
interval += 1
photos_out['filtered_photos'] = filtered_photos
return photos_out
def _load_from_sources(self, key):
# Load the core metadata table
self.core_photo_metadata = GetProjectData().get_data_of_type('Core Photo Metadata')[0]
if not self.lazy_load:
print('Warning, lazy loading disabled, loading full photo set')
self._load_photos(self.core_photo_metadata)
else:
print('Loading lazily, will only load requested photos')
return self.loaded_photos
def _load_photos(self, photo_info_df):
for idx, row in photo_info_df.iterrows():
if not self.data_exists(key=f'core_photo{idx}'):
self._load_photo( idx, row)
self.loaded_photos[idx] = f'core_photo{idx}'
def _load_photo(self, idx, row):
print(f'Loading Photo at idx: {idx}')
current_photo = GetProjectData().get_data_with_name(name=row['file_key'], download=False)
self.set_data(key=f'core_photo{idx}', data=current_photo)
def _clean_up_photos(self, photos_df):
output_photos = []
for idx, row in photos_df.iterrows():
if not self.data_exists(key=f'core_photo{idx}'):
self._load_photo(idx, row)
current_photo = self.get_data(key=f'core_photo{idx}', load=False)[0]['image_array']
current_photo = current_photo[:, 75:, :]
max_val = 255 * current_photo.shape[0]
max_val_rows = 255 * current_photo.shape[1]
brightness = current_photo[:, :, 0] * 0.21 + current_photo[:, :, 1] * 0.72 + current_photo[:, :, 2] * 0.07
# plt.hist(np.sum(brightness, axis=0)/max_val)
brightness_filter = ((np.sum(brightness, axis=0)/max_val) > 0.3) & ((np.sum(brightness, axis=0)/max_val) < 0.98)
row_brightness_filter = np.sum(brightness, axis=1) / max_val_rows > 0.1
cleaned_photo = current_photo[:, brightness_filter, :]
# Check for header at the top
if np.sum(~row_brightness_filter[:230]) > 60:
cleaned_photo = cleaned_photo[230:, :, :]
cleaned_photo = cleaned_photo[:, :900, :]
output_photos.append(cleaned_photo)
if cleaned_photo.shape[1] < 400:
im = Image.fromarray(cleaned_photo)
im.show()
combined_photo = np.concatenate(output_photos, axis=0)
return combined_photo
|
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from pandas.api.types import is_datetime64_any_dtype
from pmdarima.arima import auto_arima, ARIMA
def convert_float(rawdata):
result = rawdata.copy()
# converting all columns to float64 if numeric
for col in result.columns:
if is_numeric_dtype(result[col]):
result[col] = result[col].astype("float64")
result = result.loc[
:, [x == "float64" for x in result.dtypes]
].copy() # only keep numeric columns
return result
def estimate_arma(series):
"""Estimate ARMA parameters on a series"""
series = pd.Series(series)
series = series[~pd.isna(series)]
arma_model = auto_arima(
series,
start_p=0,
d=0,
start_q=0,
D=0,
stationary=True,
suppress_warnings=True,
error_action="ignore",
)
return arma_model
def ragged_fill_series(
series,
function=np.nanmean,
backup_fill_method=np.nanmean,
est_series=None,
fitted_arma=None,
arma_full_series=None,
):
"""Filling in the ragged ends of a series, adhering to the periodicity of the series. If there is only one observation and periodicity cannot be determined, series will be returned unchanged.
parameters:
:series: list/pandas Series: the series to fill the ragged edges of. Missings should be np.nans
:function: the function to fill nas with (e.g. np.nanmean, etc.). Use "ARMA" for ARMA filling
:backup_fill_method: function: which function to fill ragged edges with in case ARMA can't be estimated
:est_series: list/pandas Series: optional, the series to calculate the fillna and/or ARMA function on. Should not have nas filled in yet by any method. E.g. a train set. If None, will calculated based on itself.
:fitted_arma: optional, fitted ARMA model if available to avoid reestimating every time in the `gen_ragged_X` function
:arma_full_series: optional, for_full_arma_dataset output of `gen_dataset` function. Fitting the ARMA model on the full series history rather than just the series provided
output:
:return: pandas Series with filled ragged edges
"""
result = pd.Series(series).copy()
if est_series is None:
est_series = result.copy()
# periodicity of the series, to see which to fill in
nonna_bools = ~pd.isna(series)
nonna_indices = list(nonna_bools.index[nonna_bools]) # existing indices with values
# if there is only one non-na observation, can't determine periodicity or position in full series, don't fill anything
if len(nonna_indices) > 1:
periodicity = int(
(
pd.Series(result[~pd.isna(result)].index)
- (pd.Series(result[~pd.isna(result)].index)).shift()
).mode()[0]
) # how often data comes (quarterly, monthly, etc.)
last_nonna = result.index[result.notna()][-1]
fill_indices = nonna_indices + [
int(nonna_indices[-1] + periodicity * i)
for i in range(1, (len(series) - last_nonna))
] # indices to be filled in, including only the correct periodicity
fill_indices = [
x for x in fill_indices if x in series.index
] # cut down on the indices if went too long
if function == "ARMA":
# estimate the model if not given
if fitted_arma is None:
fitted_arma = estimate_arma(est_series)
# instantiate model with previously estimated parameters (i.e. on train set)
arma = ARIMA(order=fitted_arma.order)
arma.set_params(**fitted_arma.get_params())
# refit the model on the full series to this point
if arma_full_series is not None:
y = list(arma_full_series[~pd.isna(arma_full_series)])
present = list(result[~pd.isna(result)])
# limit the series to the point where actuals are
end_index = 0
for i in range(len(present), len(y) + 1):
if list(y[(i - len(present)) : i]) == list(present):
end_index = i
y = y[:end_index]
# refit model on just this series
else:
y = list(result[~pd.isna(result)]) # refit the model on data
present = y.copy()
# can fail if not enough datapoints for order of ARMA process
try:
arma.fit(y, error_action="ignore")
preds = arma.predict(n_periods=int(len(series) - last_nonna))
fills = list(present) + list(preds)
fills = fills[: len(fill_indices)]
except:
fills = list(result[~pd.isna(result)]) + [
backup_fill_method(est_series)
] * (len(series) - last_nonna)
fills = fills[: len(fill_indices)]
result[fill_indices] = fills
else:
fills = list(result[~pd.isna(result)]) + [function(est_series)] * (
len(series) - last_nonna
)
fills = fills[: len(fill_indices)]
result[fill_indices] = fills
return result, fitted_arma
def gen_dataset(
rawdata,
target_variable,
fill_na_func=np.nanmean,
fill_ragged_edges=None,
fill_na_other_df=None,
arma_full_df=None,
):
"""Intermediate step to generate a raw dataset the model will accept
Input should be a pandas dataframe of of (n observations) x (m features + 1 target column). Non-numeric columns will be dropped. Missing values should be `np.nan`s.
The data should be fed in in the time of the most granular series. E.g. 3 monthly series and 2 quarterly should be given as a monthly dataframe, with NAs for the two intervening months for the quarterly variables. Apply the same logic to yearly or daily variables.
parameters:
:rawdata: pandas DataFrame: n x m+1 dataframe
:target_variable: str: name of the target variable column
:fill_na_func: function: function to replace within-series NAs. Given the column, the function should return a scalar.
:fill_ragged_edges: function to replace NAs in ragged edges (data missing at end of series). Pass "ARMA" for ARMA filling
:fill_na_other_df: pandas DataFrame: A dataframe with the exact same columns as the rawdata dataframe. For use with filling NAs based on a different dataset (e.g. the train dataset). E.g. `train=LSTM(...)`, `gen_dataset(test_data, target_variable, fill_na_other_df=train.data)`
:arma_full_df: pandas DataFrame: A dataframe with the exact same columns as the rawdata dataframe. For use with ARMA filling on a full-series history, rather than just the history present in the train set
output:
:return: Dict of numpy arrays: n x m+1 arrays.
na_filled_dataset: NA filled dataset the model will be trained on
for_ragged_dataset: dataset with NAs maintained, for knowing periodicity in `gen_ragged_X` function
for_full_arma_dataset: optional, full dataset for calculating ARMA on full history of series
other_dataset: other dataset (i.e. train) on which to base NA filling
arma_models: list of fitted ARMA models
"""
if fill_ragged_edges is None:
fill_ragged_edges = fill_na_func
# if there is no date column, raise an error
if not (any([is_datetime64_any_dtype(rawdata[x]) for x in rawdata.columns])):
raise Exception(
"No column of date datatype. Make sure to convert your date column or add parse_dates parameter to pandas.read_csv"
)
date_series = rawdata[
[
column
for column in rawdata.columns
if is_datetime64_any_dtype(rawdata[column])
]
]
rawdata = convert_float(rawdata)
# to get fill_na values based on either this dataframe or another (training)
if fill_na_other_df is None:
fill_na_df = rawdata.copy()
else:
fill_na_df = convert_float(fill_na_other_df)
variables = list(
rawdata.columns[rawdata.columns != target_variable]
) # features, excluding target variable
# fill NAs with a function
for_ragged = rawdata.copy() # needs to be kept for generating ragged data
arma_models = [] # initializing fitted ARMA models
for col in rawdata.columns[
rawdata.columns != target_variable
]: # leave target as NA
# ragged edges
if arma_full_df is not None:
filled_series = ragged_fill_series(
rawdata[col],
function=fill_ragged_edges,
est_series=fill_na_df[col],
arma_full_series=arma_full_df[col],
)
else:
filled_series = ragged_fill_series(
rawdata[col], function=fill_ragged_edges, est_series=fill_na_df[col]
)
rawdata[col] = filled_series[0]
arma_models.append(filled_series[1])
# within-series missing
rawdata[col] = rawdata[col].fillna(fill_na_func(fill_na_df[col]))
# drop any rows still with missing X data, in case fill_na_func doesn't get full coverage
rawdata = rawdata.loc[rawdata.loc[:, variables].dropna().index, :].reset_index(
drop=True
)
for_ragged = for_ragged.loc[
rawdata.loc[:, variables].dropna().index, :
].reset_index(drop=True)
# returning array, target variable at the end
def order_dataset(rawdata, variables, target_variable):
data_dict = {}
for variable in variables:
data_dict[variable] = rawdata.loc[:, variable].values
data_dict[variable] = data_dict[variable].reshape(
(len(data_dict[variable]), 1)
)
target = rawdata.loc[:, target_variable].values
target = target.reshape((len(target), 1))
dataset = np.hstack(([data_dict[k] for k in data_dict] + [target]))
return dataset
# final datasets
dataset = order_dataset(rawdata, variables, target_variable)
for_ragged_dataset = order_dataset(for_ragged, variables, target_variable)
if arma_full_df is not None:
for_arma_full = order_dataset(
convert_float(arma_full_df), variables, target_variable
)
else:
for_arma_full = None
fill_na_other = order_dataset(fill_na_df, variables, target_variable)
if arma_models[0] is None:
arma_models = None
return {
"na_filled_dataset": dataset,
"for_ragged_dataset": for_ragged_dataset,
"for_full_arma_dataset": for_arma_full,
"other_dataset": fill_na_other,
"date_series": date_series,
"arma_models": arma_models,
}
def gen_model_input(dataset, n_timesteps, drop_missing_ys=True):
"""Final step in generating a dataset the model will accept
Input should be output of the `gen_dataset` function. Creates two series, X for input and y for target.
y is a one-dimensional np array equivalent to a list of target values.
X is an n x n_timesteps x m matrix.
Essentially the input data for each test observation becomes an n_steps x m matrix instead of a single row of data. In this way the LSTM network can learn from each variables past, not just its current value.
Observations that don't have enough n_steps history will be dropped.
parameters:
:dataset: numpy array: n x m+1 array
:n_timesteps: int: how many historical periods to consider when training the model. For example if the original data is monthly, n_timesteps=12 would consider data for the last year.
:drop_missing_ys: boolean: whether or not to filter out missing ys. Set to true when creating training data, false when want to run predictions on data that may not have a y.
output:
:return: numpy tuple of:
X: `n_obs x n_timesteps x n_features`
y: `n_obs`
"""
X, y = list(), list()
for i in range(len(dataset)):
# find the end of this pattern
end_ix = i + n_timesteps
# check if we are beyond the dataset
if end_ix > len(dataset):
break
# gather input and output parts of the pattern
seq_x, seq_y = dataset[i:end_ix, :-1], dataset[end_ix - 1, -1]
X.append(seq_x)
y.append(seq_y)
X = np.array(X)
y = np.array(y)
if drop_missing_ys:
X = X[~pd.isna(y), :, :] # delete na ys, no useful training data
y = y[~pd.isna(y)]
return X, y
def gen_ragged_X(
X,
pub_lags,
lag,
for_ragged_dataset,
target_variable,
fill_ragged_edges=np.nanmean,
backup_fill_method=np.nanmean,
other_dataset=None,
for_full_arma_dataset=None,
arma_models=None,
dates=None,
start_date=None,
end_date=None,
):
"""Produce vintage model inputs given the period lag of different variables, for use when testing historical performance (model evaluation, etc.)
parameters:
:X: numpy array: n x m+1 array, second output of `gen_model_input` function, `for_ragged_dataset`, passed through the `gen_model_input` function
:pub_lags: list[int]: list of periods back each input variable is set to missing. I.e. publication lag of the variable.
:lag: int: simulated periods back, interpretable as last complete period relative to target period. E.g. -2 = simulating data as it would have been 1 month before target period, i.e. 2 months ago is last complete period.
:for_ragged_dataset: numpy array: the original full ragged dataset, output of `gen_dataset` function, `for_ragged_dataset`
:target_variable: str: the target variable of this dataset
:fill_ragged_edges: function: which function to fill ragged edges with, "ARMA" for ARMA model
:backup_fill_method: function: which function to fill ragged edges with in case ARMA can't be estimated. Should be the same as originally passed to `gen_dataset` function
:other_dataset: numpy array: other dataframe from which to calculate the fill NA values, i.e. a training dataset. Output of `gen_dataset` function, `other_dataset`
:for_full_arma_dataset: numpy array: data to fit the ARMA model on
:dates: pandas Series: list of dates for the data
:start_date: str in "YYYY-MM-DD" format: start date of generating ragged preds. To save calculation time, i.e. just calculating after testing date instead of all dates
:end_date: str in "YYYY-MM-DD" format: end date of generating ragged preds
output:
:return: numpy array equivalent in shape to X input, but with trailing edges set to NA then filled
"""
# to get fill_na values based on either this dataframe or another (training)
if other_dataset is None:
fill_na_dataset = for_ragged_dataset
else:
fill_na_dataset = other_dataset
# if no ragged edges fill provided, just do same as backup method
if fill_ragged_edges is None:
fill_ragged_edges = backup_fill_method
# if no dates given
if dates is None:
dates = [0] * X.shape[0]
start_date = 0
end_date = 0
# clearing ragged data
X_ragged = np.array(X)
for obs in range(X_ragged.shape[0]): # go through every observation
# only do if within desired date range
if (dates[obs] >= start_date) & (dates[obs] <= end_date):
for var in range(
len(pub_lags)
): # every variable (and its corresponding lag)
for ragged in range(
1, pub_lags[var] + 1 - lag
): # setting correct lags (-lag because input -2 for -2 months, so +2 additional months of lag)
X_ragged[
obs, X_ragged.shape[1] - ragged, var
] = np.nan # setting to missing data
if fill_ragged_edges == "ARMA":
# pass the full ARMA series if available
if for_full_arma_dataset is None:
X_ragged[obs, :, var] = ragged_fill_series(
pd.Series(X_ragged[obs, :, var]),
function=fill_ragged_edges,
backup_fill_method=backup_fill_method,
est_series=fill_na_dataset[:, var],
fitted_arma=arma_models[var],
)[0]
else:
X_ragged[obs, :, var] = ragged_fill_series(
pd.Series(X_ragged[obs, :, var]),
function=fill_ragged_edges,
backup_fill_method=backup_fill_method,
est_series=fill_na_dataset[:, var],
fitted_arma=arma_models[var],
arma_full_series=for_full_arma_dataset[:, var],
)[0]
else:
X_ragged[obs, :, var] = ragged_fill_series(
pd.Series(X_ragged[obs, :, var]),
function=fill_ragged_edges,
est_series=fill_na_dataset[:, var],
)[0]
X_ragged[obs, :, var] = pd.Series(X_ragged[obs, :, var]).fillna(
backup_fill_method(fill_na_dataset[:, var])
)
return X_ragged
|
from FreeTAKServer.model.FTSModel.fts_protocol_object import FTSProtocolObject
class ChecklistTasks(FTSProtocolObject):
@staticmethod
def Checklist():
checklistTasks = ChecklistTasks()
return checklistTasks
|
import turtle
import math
import weakref
import threading
import sys
from pylogo.common import *
from ide import add_command, get_canvas
class Turtle:
_all_turtles = []
_turtle_count = 1
def __init__(self):
self.pen = turtle.RawPen(get_canvas())
self.pen.degrees()
self._all_turtles.append(weakref.ref(self))
self._count = self._turtle_count
self.__class__._turtle_count += 1
def __repr__(self):
return '<%s %i>' % (self.__class__.__name__,
self._count)
@logofunc()
def turtle(self):
return self
@logofunc(aliases=['fd'])
def forward(self, v):
add_command(self.pen.forward, v)
add_command(get_canvas().update)
@logofunc(aliases=['back', 'bk'])
def backward(self, v):
add_command(self.pen.backward, v).add_command(get_canvas().update)
@logofunc(aliases=['lt'])
def left(self, v):
add_command(self.pen.left, v)
@logofunc(aliases=['rt'])
def right(self, v):
add_command(self.pen.right, v)
@logofunc(aliases=['pu'])
def penup(self):
add_command(self.pen.up)
@logofunc(aliases=['pd'])
def pendown(self):
add_command(self.pen.down)
@logofunc(aware=True)
def penwidth(self, v):
add_command(self.pen.width, v)
@logofunc(aliases=['pc', 'color'],
arity=1)
def pencolor(self, *args):
add_command(self.pen.color, *args)
@logofunc(aliases=['ht'])
def hideturtle(self):
add_command(self.pen.tracer, 0)
@logofunc(aliases=['st'])
def showturtle(self):
add_command(self.pen.tracer, 1)
@logofunc(aliases=['turtleprint', 'turtlepr'], arity=1)
def turtlewrite(self, text, move=False):
if isinstance(text, list):
text = ' '.join(map(str, text))
else:
text = str(text)
add_command(self.pen.write, text, move)
add_command(get_canvas().update)
@logofunc()
def startfill(self):
add_command(self.pen.fill, 1)
@logofunc()
def endfill(self):
add_command(self.pen.fill, 0)
add_command(get_canvas().update)
@logofunc()
def setxy(self, x, y):
add_command(self.pen.goto, x, y)
add_command(get_canvas().update)
@logofunc()
def setx(self, x):
t = self.pen
add_command(t.goto, x, t.position()[1])
add_command(get_canvas().update)
@logofunc()
def sety(self, y):
t = self.pen
add_command(t.goto, t.position()[0], y)
add_command(get_canvas().update)
@logofunc()
def posx(self):
return self.pen.position()[0]
@logofunc()
def posy(self):
return self.pen.position()[1]
@logofunc()
def heading(self):
return self.pen.heading()
@logofunc()
def setheading(self, v):
add_command(self.pen.setheading, v)
@logofunc()
def home(self):
add_command(self.pen.setheading, 0)
add_command(self.pen.goto, 0, 0)
add_command(get_canvas().update)
@logofunc(aliases=['cs', 'clearscreen'])
def clear(self):
self.home()
add_command(self.pen.clear)
add_command(get_canvas().update)
@logofunc(arity=1)
def distance(self, other, orig=None):
if orig is None:
orig = self.pen
return math.sqrt((orig.position()[0]-other.position()[0])**2 +
(orig.position()[1]-other.position()[1])**2)
@logofunc(aware=True)
def clone(self, interp):
new = self.__class__()
@logofunc()
def allturtles():
return [t() for t in Turtle._all_turtles if t()]
@logofunc(aware=True)
def createturtle(interp):
t = Turtle()
interp.push_actor(t)
|
import os
import torch
from torchvision.datasets import Kinetics400
def getkinetics(datafolder,tempfolder,categorylist,frames_per_instance,reallabel,frame_skip=1,centercrop=None):
# TODO
# for category in categorylist:
# os.system("mv "+datafolder+"/"+category+" "+tempfolder)
a = Kinetics400(tempfolder,300,extensions=('mp4',))
datas=[]
print("Total videos: "+str(len(a)))
for ii in range(len(a)):
(video,audio,label)=a[ii]
vh=len(video[0])
vw=len(video[0][0])
v=video.view(-1,frame_skip,vh,vw,3)[:,0,:,:,:].squeeze()
if centercrop is not None:
w,h=centercrop
if(w>vw)or(h>vh):
continue
hstart=(vh-h)//2
hend=hstart+h
wstart=(vw-w)//2
wend=wstart+w
v=v[:,hstart:hend,wstart:wend,:]
alen=len(audio[0]) # TODO this is wrong, should be 1
print(len(v))
'''
ap=alen*frames_per_instance*frame_skip//300
for i in range(len(v)//frames_per_instance):
vi=v[i*frames_per_instance:(i+1)*frames_per_instance]
alen=len(audio[0])
ai=audio[ap*i:ap*(i+1),:]
print(vi.shape)
print(ai.shape)
datas.append((vi,ai,reallabel))
'''
exit()
for category in categorylist:
os.system("mv "+tempfolder+"/"+category+" "+datafolder)
return datas
def getdata(datalist,splitsize=50):
catacount = 0
trainhome = '/home/pliang/yiwei/kinetics/ActivityNet/Crawler/Kinetics/test_data/'
zemp_dir = trainhome+'zemp/'
if not os.path.exists(zemp_dir):
os.makedirs(zemp_dir)
for category in datalist:
files = os.listdir(trainhome+category)
for i in range((len(files)-1)//splitsize+1):
for j in range(0,splitsize):
if i*splitsize+j >= len(files):
break
os.system('cp -r '+trainhome+category+'/'+files[i*splitsize+j]+' '+zemp_dir)
# os.system('mv '+trainhome+category+'/'+files[i*splitsize+j]+' '+trainhome+'zemp/')
# a=getkinetics(trainhome,trainhome,['zemp'],150,catacount,2,(224,224))
a=getkinetics(trainhome,'/home/pliang/yiwei/kinetics/ActivityNet/Crawler/Kinetics/temp',['zemp'],150,catacount,2,(224,224))
exit()
torch.save(a,'/data/yiwei/kinetics_small/test/'+category+str(i)+'.pt')
os.system('mv '+trainhome+'zemp/* '+trainhome+category)
catacount += 1
getdata(['archery','breakdancing','crying','dining','singing'])
|
##
import requests, time, smtplib, logging, datetime
## CONFIGURE YOUR GEO-LOCATION FOR BETTER AND ACCURATE RESULTS
# find your coordinates at "latlong.net"
# enter your email and passord to get an email notification (optional)
# DONT FORGET TO ENTER YOUR COORDINATES...THOSE ENTERED ARE AN EXAMPLE !!
info = {
'email':' ',
'password':"",
'cood':{
'longitude':'35.1466',
'latitude':' -0.671895',
},
}
ISS_position_api = "http://api.open-notify.org/iss-now.json"
sunrise_sunset_api = "https://api.sunrise-sunset.org/json"
logging.basicConfig(format = "%(levelname)s %(asctime)s %(message)s",filename = "data.log", level = logging.INFO)
class ISS_LOOKOUT:
def get_ISS_pos(self):
# get the cordinates of the ISs
response = requests.get(ISS_position_api)
response.raise_for_status()
data = response.json()
latitude = data['iss_position']["latitude"]
longitude = data['iss_position']['longitude']
iss_position = (latitude, longitude)
return iss_position
def is_it_overhead(self):
# returns true if the ISS is near your coordinates
iss_pos = self.get_ISS_pos()
lat = float(iss_pos[0])
lng = float(iss_pos[1])
my_lat = float(info['cood']['latitude'])
my_long = float(info['cood']['longitude'])
if my_lat -5 <= lat <= my_lat + 5:
if my_long -5 <= lng <= my_long + 5:
return True
def is_it_dark(self):
# checks if it is night so that
# the iss is visible
parameters = {
'lat':info['cood']['latitude'],
'lng':info['cood']['latitude'],
'formatted':0,
}
response = requests.get(sunrise_sunset_api, params = parameters)
response.raise_for_status()
data = response.json()
print(data)
sunrise = int(data['results']['sunrise'].split("T")[1].split(":")[0])
sunset =int(data['results']['sunset'].split("T")[1].split(":")[0])
time_now = datetime.datetime.now().hour
if sunrise >= time_now or sunset <= time_now:
return True
def notify_me(self):
logging.info("ISS is overhead !")
try:
# send yourself an email
connection = smtplib.SMTP('smtp.gmail.com')
connection.starttls()
connection.login(info['email'], info['password'])
connection.sendmail(
from_addr = info['email'],
to_addrs = info['email'],
msg = """Subject:LOOK UP !
THE ISS IS NOW VISIBLE...LOOK uP!!!
""")
except:
pass
def run(self):
while True:
if self.is_it_dark() and self.is_it_overhead():
self.notify_me()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.