content stringlengths 5 1.05M |
|---|
def calculate(a, b):
return a + b, a - b
res = calculate(20, 10)
print(f'The sum is {res[0]} and the difference is {res[1]}')
|
import logging
from typing import Optional
import pykube
import requests
import urllib3.exceptions
logger = logging.getLogger(__name__)
# Set in login(), consumed in get_pykube_cfg() and all API calls.
_pykube_cfg: Optional[pykube.KubeConfig] = None
class LoginError(Exception):
""" Raised when the operator cannot login to the API. """
class AccessError(Exception):
""" Raised when the operator cannot access the cluster API. """
def login(verify: bool = False) -> None:
"""
Login to Kubernetes cluster, locally or remotely.
Keep the logged in state or config object in the global variables,
so that it can be available for future calls via the same function call.
Automatic refresh/reload of the tokens or objects also should be done here.
"""
# Pykube login is mandatory. If it fails, the framework will not run at all.
try:
import pykube
except ImportError:
raise # mandatory
else:
login_pykube(verify=verify)
# We keep the official client library auto-login only because it was
# an implied behavior before switching to pykube -- to keep it so (implied).
try:
import kubernetes
except ImportError:
pass # optional
else:
login_client(verify=verify)
def login_pykube(verify: bool = False) -> None:
global _pykube_cfg
try:
_pykube_cfg = pykube.KubeConfig.from_service_account()
logger.debug("Pykube is configured in cluster with service account.")
except FileNotFoundError:
try:
_pykube_cfg = pykube.KubeConfig.from_file()
logger.debug("Pykube is configured via kubeconfig file.")
except (pykube.PyKubeError, FileNotFoundError):
raise LoginError(f"Cannot authenticate pykube neither in-cluster, nor via kubeconfig.")
if verify:
verify_pykube()
def login_client(verify: bool = False) -> None:
import kubernetes.client
try:
kubernetes.config.load_incluster_config() # cluster env vars
logger.debug("Client is configured in cluster with service account.")
except kubernetes.config.ConfigException as e1:
try:
kubernetes.config.load_kube_config() # developer's config files
logger.debug("Client is configured via kubeconfig file.")
except kubernetes.config.ConfigException as e2:
raise LoginError(f"Cannot authenticate client neither in-cluster, nor via kubeconfig.")
if verify:
verify_client()
def verify_pykube() -> None:
"""
Verify if login has succeeded, and the access configuration is still valid.
All other errors (e.g. 403, 404) are ignored: it means, the host and port
are configured and are reachable, the authentication token is accepted,
and the rest are authorization or configuration errors (not a showstopper).
"""
try:
api = get_pykube_api()
rsp = api.get(version="", base="/")
rsp.raise_for_status()
api.raise_for_status(rsp) # replaces requests's HTTPError with its own.
except requests.exceptions.ConnectionError as e:
raise AccessError("Cannot connect to the Kubernetes API. "
"Please configure the cluster access.")
except pykube.exceptions.HTTPError as e:
if e.code == 401:
raise AccessError("Cannot authenticate to the Kubernetes API. "
"Please login or configure the tokens.")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise AccessError("Cannot authenticate to the Kubernetes API. "
"Please login or configure the tokens.")
def verify_client() -> None:
"""
Verify if login has succeeded, and the access configuration is still valid.
All other errors (e.g. 403, 404) are ignored: it means, the host and port
are configured and are reachable, the authentication token is accepted,
and the rest are authorization or configuration errors (not a showstopper).
"""
import kubernetes.client.rest
try:
api = kubernetes.client.CoreApi()
api.get_api_versions()
except urllib3.exceptions.HTTPError as e:
raise AccessError("Cannot connect to the Kubernetes API. "
"Please configure the cluster access.")
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
raise AccessError("Cannot authenticate to the Kubernetes API. "
"Please login or configure the tokens.")
def get_pykube_cfg() -> pykube.KubeConfig:
if _pykube_cfg is None:
raise LoginError("Not logged in with PyKube.")
return _pykube_cfg
# TODO: add some caching, but keep kwargs in mind. Maybe add a key= for purpose/use-place?
def get_pykube_api(
timeout: Optional[float] = None,
) -> pykube.HTTPClient:
kwargs = dict(timeout=timeout)
return pykube.HTTPClient(get_pykube_cfg(), **kwargs)
|
from functools import partial
def setupG5x(parent):
parent.g5xBtnGrp.buttonClicked.connect(partial(g5xKeypad, parent))
parent.g5xPresetGrp.buttonClicked.connect(partial(g5xPreset, parent))
parent.g5xBkspBtn.clicked.connect(partial(g5xBackSpace, parent))
def g5xKeypad(parent, button):
char = str(button.text())
text = parent.g5xOffsetLbl.text() or 'null'
if text != 'null':
text += char
else:
text = char
parent.g5xOffsetLbl.setText(text)
def g5xPreset(parent, button):
parent.g5xOffsetLbl.setText(str(button.text()))
def g5xBackSpace(parent):
if len(parent.g5xOffsetLbl.text()) > 0:
text = parent.g5xOffsetLbl.text()[:-1]
parent.g5xOffsetLbl.setText(text)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2020-01-23 18:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bossingest', '0007_auto_20190627_2236'),
]
operations = [
migrations.AlterField(
model_name='ingestjob',
name='status',
field=models.IntegerField(choices=[(0, 'Preparing'), (1, 'Uploading'), (2, 'Complete'), (3, 'Deleted'), (4, 'Failed'), (5, 'Completing')], default=0),
),
]
|
class Controller(object):
def add(self, name, *bindings):
'''
Adds an action to be checked.
name: The name for the action.
bindings: The binging or sequence of bindings that trigger this action.
'''
raise NotImplementedError('call to abstract method ' + repr(self.add))
def check(self, name, events):
'''
Returns if any of the action keys are held down. With this function you can check
to see if a key is held down or not. Unlike the 'check_pressed' or 'check_released'
methods which are only triggered once when a key is pressed or released, this
function is triggered every step that the key is held down for.
name: The name for the action.
events: The list of events.
'''
raise NotImplementedError('call to abstract method ' + repr(self.check))
def check_pressed(self, name, events):
'''
Returns if any of the action keys has just been pressed. With this function you can
check to see if a key has been pressed or not. Unlike the 'check' method, this
function will only run once for every time the key is pressed down, so for it to
trigger again, the key must be first released and then pressed again.
name: The name for the action.
events: The list of events.
'''
raise NotImplementedError('call to abstract method ' + repr(self.check_pressed))
def check_released(self, name, events):
'''
Returns if any of the action keys has just been released. With this function you can
check to see if a key has been released or not. Unlike the 'check' function, this
function will only run once for every time the key is lifted, so for it to trigger
again, the key must be first pressed and then released again.
name: The name for the action.
events: The list of events.
'''
raise NotImplementedError('call to abstract method ' + repr(self.check_released)) |
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
def get_maximal_square_length(i, j):
square_length = 1
all_ones = True
while i + square_length < rows and j + square_length < cols and all_ones:
for k in range(i, i + square_length + 1):
if matrix[k][j + square_length] == "0": # Why?
all_ones = False
break
for k in range(j, j + square_length + 1):
if matrix[i + square_length][k] == "0": # Why?
all_ones = False
break
if all_ones:
square_length += 1
return square_length
if not matrix:
return 0
max_square_side = 0
rows = len(matrix)
cols = len(matrix[0])
for i in range(rows):
for j in range(cols):
if matrix[i][j] == "1":
square_length = get_maximal_square_length(i, j)
max_square_side = max(max_square_side, square_length)
return max_square_side ** 2
|
import unittest
from flaskMathApp.factorial.service import factorialService
class FactorialServicetestCase(unittest.TestCase):
def test_sanitize_inputs_success(self):
result = factorialService.sanitizeAndCalculate(1)
self.assertTrue(result != None)
def test_sanitize_inputs_fail(self):
result = factorialService.sanitizeAndCalculate('test')
self.assertTrue(result == None)
def test_correct_calculation(self):
result = factorialService.calculate(1)
self.assertEqual(1, 1) |
import numpy as np
"""
Exposure functions for a ground based UHECR observatory.
The exposure is calculated using the method described in
Sommers, P., 2000. Cosmic Ray Anisotropy Analysis with a Full-Sky Observatory. arXiv.org.
Here, the exposure functions are written in terms of the spherical
coordinate theta, where theta = pi/2 - declination and theta [pi, 0], dec [-pi/2, pi/2].
This expression is for convenience when translating to the Stan run code in which
everything takes place on the unit sphere.
For completeness, the exposure as a function of declination is also included
with _dec appended to the function name.
@author Francesca Capel
@date June 2018
"""
'''
For reference:
p[0] = np.cos(lat)
p[1] = np.sin(lat)
p[2] = np.cos(theta_m)
p[3] = alpha_T
p[4] = M
'''
def xi(theta, p):
return (p[2] - (p[1] * np.cos(theta))) / (p[0] * np.sin(theta))
def alpha_m(theta, p):
xi_val = xi(theta, p)
if (xi_val > 1):
return 0
elif (xi_val < -1):
return np.pi
else:
return np.arccos(xi_val)
def m(theta, p):
return (p[0] * np.sin(theta) * np.sin(alpha_m(theta, p))
+ alpha_m(theta, p) * p[1] * np.cos(theta))
def integrand(phi, theta, varpi, kappa, p):
"""
Integrand for \int d(omega) m(omega) * rho(omega | kappa).
Expressed as an integral in spherical coordinates for
theta [0, pi] and phi [0, 2pi]. For use with
scipy.integrate.dblquad
NB: the alpha_T / M factor from eps(omega) is included
NB: the kappa / 4pi*sinh(kappa) from the vMF is included
"""
omega = [np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)]
if kappa > 100:
integ = (p[3] / p[4]) * np.exp( kappa * np.dot(omega, varpi) + np.log(kappa) - np.log(4 * np.pi / 2)
- kappa ) * m(theta, p) * np.sin(theta)
else:
integ = (p[3] / p[4]) * constant_val(kappa) * np.exp(kappa * np.dot(omega, varpi)) * m(theta, p) * np.sin(theta)
return integ
def integrand_vMF(phi, theta, varpi, kappa):
"""
Integrand for \int d(omega) * rho(omega | kappa).
Expressed as an integral in spherical coordinates for
theta [0, pi] and phi [0, 2pi]. For use with
scipy.integrate.dblquad
"""
omega = [np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)]
if kappa > 100:
integ = np.exp( kappa * np.dot(omega, varpi) + np.log(kappa) - np.log(4 * np.pi / 2)
- kappa ) * np.sin(theta)
else:
integ = constant_val(kappa) * np.exp(kappa * np.dot(omega, varpi)) * np.sin(theta)
return integ
def integrand_approx(phi, theta, varpi, kappa, p):
"""
Integrand for \int d(omega) m(omega) * rho(omega | kappa).
Expressed as an integral in spherical coordinates for
theta [0, pi] and phi [0, 2pi]
Approximation used to avoid numerical overflow at large kappa.
"""
omega = [np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)]
integ = np.exp( kappa * np.dot(omega, varpi) + np.log(kappa) - np.log(4 * np.pi / 2)
- kappa ) * m(theta, p) * np.sin(theta)
return integ
def alpha(theta, phi, varpi):
"""
The angle for which omega.varpi = cos(alpha).
The angle between omega and varpi unit vectors.
"""
inner = (varpi[0] * np.sin(theta) * np.cos(phi)
+ varpi[1] * np.sin(theta) * np.sin(phi)
+ varpi[2] * np.cos(theta))
return np.arccos(inner)
def m_integrand(theta, p):
"""
Integrand for \int d(omega) m(omega).
Expressed as an integral in spherical coordinates for
theta [0, pi] and phi [0, 2pi]
"""
return 2 * np.pi * m(theta, p) * np.sin(theta)
def constant_val(kappa):
"""
Constant in front of integral.
Approximates for large kappa to avoid numerical overflow.
"""
if kappa > 100:
return kappa / (4 * np.pi * np.exp(kappa - np.log(2)))
else:
return kappa / (4 * np.pi * np.sinh(kappa))
def kappa_dval(sig_omega):
'''
KW: Based on Eq. 9 in Capel and Mortlock (2019)
:param sig_omega: the angular reconstruction uncertainty in degrees
'''
return 7552. * sig_omega**-2.
"""
Exposure as a function of declination.
"""
def xi_dec(dec, p):
return (p[2] - p[1] * np.sin(dec)) / (p[0] * np.cos(dec))
def alpha_m_dec(dec, p):
xi_val = xi_dec(dec, p)
if (xi_val > 1):
return 0
elif (xi_val < -1):
return np.pi
else:
return np.arccos(xi_val)
def m_dec(dec, p):
return (p[0] * np.cos(dec) * np.sin(alpha_m_dec(dec, p))
+ alpha_m_dec(dec, p) * p[1] * np.sin(dec))
|
import numpy as np
from ..Tools.Downloading._ReadDataIndex import _ReadDataIndex
from . import _GMOM
def ReadIndex(sc='a',Prod='GMOM',L='2'):
'''
Reads the index file for a given data product.
Inputs
======
sc : str
'a'|'b'|'c'|'d'|'e'
Prod: str
Product string (see below)
L : str or int
Level of data to download (0,1,2)
Available data products
=======================
Prod L Description
========================================================================
GMOM 2 Ground Moments Level 2 CDF
(Level 0 data might not work)
Returns
=======
numpy.recarray
'''
return _ReadDataIndex(_GMOM.idxfname.format(Prod,L,sc))
|
from sys import path
path.append("src/main/python")
from helloapp.webapp import application
if __name__ == "__main__":
application.run(debug=True) |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import random
import numpy as np
import pandas as pd
from copy import copy
from pymatgen import Structure
class MonteCarloSampler(object):
"""
Sample a subset from the dataset to achieve some criteria using simulated annealing.
For example, one needs to subset the data so that a fraction
of the data can already cover a large feature space,
i.e., maximizing the distances.
"""
def __init__(self, datasets, num_samples, cost_function):
"""
Sample a subset with size num_samples from datasets
to minimize the cost function.
Args:
datasets (numpy.array): The total datasets.
num_samples (int): Number of samples from the data.
cost_function (function): Function that takes into
a subset of the data and calculate a cost.
"""
self.datasets = datasets
self.num_samples = num_samples
self.cost_function = cost_function
self.num_total = len(datasets)
self.num_remain = self.num_total - num_samples
self.index_selected = list(np.random.choice(
self.num_total, num_samples, replace=False))
self._get_remain_index()
self.cost = self.compute_cost(self.datasets[self.index_selected, :])
self.accepted = 0
self.rejected = 0
self.cost_history = []
self.cost_history.append(self.cost)
def _get_remain_index(self):
self.index_remain = sorted(list(set(range(self.num_total)) -
set(self.index_selected)))
def compute_cost(self, data_subset):
"""
Compute the cost of data subsets.
Args:
data_subset (numpy.array): Data subset.
"""
return self.cost_function(data_subset)
def sample(self, num_attempts, t_init, t_final):
"""
Metropolis sampler. For every sampling attempt, one data entry is
swapped with the data reservior. Then the energy difference is evaluated.
If dE < 0, the swapping is accepted. If dE > 0, then it is accepted with
probability exp(-dE / T), where T is some artificial temperature. We can
start with a relatively large T, and then reduce it with sampling process
going on.
Args:
num_attempts (int): Number of sampling attempts.
t_init (float): Initial temperature.
t_final (float): Final temperature.
"""
temperatures = np.linspace(t_init, t_final, num_attempts)
for i in range(num_attempts):
temperature = temperatures[i]
index = random.choice(self.index_selected)
index_remain = random.choice(self.index_remain)
self.update(index, index_remain, temperature)
self.cost_history.append(self.cost)
def update(self, index, index_remain, temperature):
"""
Implement the data swap, if it is accepted.
Args:
index (int): The index of selected feature matrix
used for swapping.
index_remain (int): The index of remaining feature matrix
used for swapping.
temperature (float): Artificial temperature.
"""
new_selected = copy(self.index_selected)
new_selected.remove(index)
new_selected.append(index_remain)
cost_after_swap = self.compute_cost(self.datasets[new_selected, :])
d_cost = cost_after_swap - self.cost
accept = self.decision(d_cost, temperature)
if accept:
self.index_selected = copy(new_selected)
self._get_remain_index()
self.cost = cost_after_swap
else:
pass
def decision(self, d_cost, temperature):
"""
Decision on accepting the data swap.
Args:
d_cost (float): Difference between cost in proposed move.
temperature (float): Temperature.
"""
if d_cost < 0:
self.accepted += 1
return True
else:
p = np.exp(-d_cost / temperature)
p2 = np.random.rand(1)
if p2 < p:
self.accepted += 1
return True
else:
self.rejected += 1
return False
|
"""
@author Wildo Monges
Grid was provided as an initial skeleton of the project.
Note:
This was a project that I did for the course of Artificial Intelligence in Edx.org
To run it, just execute GameManager.py
"""
class BaseDisplayer:
def __init__(self):
pass
def display(self, grid):
pass
|
import nltk
sent=" She secured 90.56 % in class X \n. She is a meritorious student\n"
from nltk.tokenize import SpaceTokenizer
print(SpaceTokenizer().tokenize(sent))
|
import wx
from message import generate_message
class RecapFrame(wx.Panel):
def __init__(self, parent, queue):
wx.Panel.__init__(self, parent)
self.queue = queue
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.input = wx.TextCtrl(self, style=(wx.TE_MULTILINE + wx.HSCROLL + wx.TE_PROCESS_TAB))
self.btn = btn = wx.Button(self, label="Format Recap")
self.output = wx.TextCtrl(self, style=(wx.TE_MULTILINE + wx.HSCROLL + wx.TE_READONLY))
self.sizer.Add(self.input, 1, wx.EXPAND)
self.sizer.Add(self.btn, 0, wx.EXPAND)
self.sizer.Add(self.output, 1, wx.EXPAND)
# Layout sizers
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
self.sizer.Fit(self)
btn.Bind(wx.EVT_BUTTON, self.onButton)
def onButton(self, event):
"""
Runs the thread
"""
msg = generate_message("RECAP", self.input.GetValue())
self.queue.put(msg)
def update(self, args):
self.output.SetValue(args)
|
# python 3.7.1
"""Available Commands:
.support"""
import asyncio
from telebot.utils import admin_cmd
@telebot.on(admin_cmd(pattern="(.*)"))
@telebot.on(sudo_cmd(pattern="(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 5)
input_str = event.pattern_match.group(1)
if input_str == "support":
await eor(event, input_str)
animation_chars = [
"Hello,",
"Hello, do you need support?",
"Then join the support group.",
"[Join Now](t.me/TeleBotHelpChat)",
"[SUPPORT GROUP](t.me/TeleBotHelpChat)",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await eor(event, animation_chars[i % 5])
|
# Simple program to pull paragraphs from the pickled GIRM
# for testing purposes.
#
# Usage: python getGIRM.py girm.pickle <paragraph number> [<paragraph number> ...]
#
import sys
import os
import re
import pickle
girm = pickle.load(open(sys.argv[1], 'rb'))
if sys.argv[2] == '-a':
for key in sorted(girm.keys(),key=int):
text, location = girm[key]
print("("+key+")->"+text)
else:
for arg in sys.argv[2:]:
text, location = girm[arg]
print("("+arg+")->"+text)
|
#!/usr/bin/env python
__all__ = ["zoomdata_api_base","zoomdata_api_mysql"]
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import tempfile
import json
import time
import unittest
from pyflink.common import ExecutionConfig, RestartStrategies
from pyflink.common.typeinfo import Types
from pyflink.datastream import (StreamExecutionEnvironment, CheckpointConfig,
CheckpointingMode, MemoryStateBackend, TimeCharacteristic)
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.table import DataTypes, CsvTableSource, CsvTableSink, StreamTableEnvironment
from pyflink.testing.test_case_utils import PyFlinkTestCase
class StreamExecutionEnvironmentTests(PyFlinkTestCase):
def setUp(self):
self.env = StreamExecutionEnvironment.get_execution_environment()
def test_get_config(self):
execution_config = self.env.get_config()
self.assertIsInstance(execution_config, ExecutionConfig)
def test_get_set_parallelism(self):
self.env.set_parallelism(10)
parallelism = self.env.get_parallelism()
self.assertEqual(parallelism, 10)
def test_get_set_buffer_timeout(self):
self.env.set_buffer_timeout(12000)
timeout = self.env.get_buffer_timeout()
self.assertEqual(timeout, 12000)
def test_get_set_default_local_parallelism(self):
self.env.set_default_local_parallelism(8)
parallelism = self.env.get_default_local_parallelism()
self.assertEqual(parallelism, 8)
def test_set_get_restart_strategy(self):
self.env.set_restart_strategy(RestartStrategies.no_restart())
restart_strategy = self.env.get_restart_strategy()
self.assertEqual(restart_strategy, RestartStrategies.no_restart())
def test_add_default_kryo_serializer(self):
self.env.add_default_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_default_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type_with_kryo_serializer(self):
self.env.register_type_with_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_registered_types_with_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type(self):
self.env.register_type("org.apache.flink.runtime.state.StateBackendTestBase$TestPojo")
type_list = self.env.get_config().get_registered_pojo_types()
self.assertEqual(type_list,
['org.apache.flink.runtime.state.StateBackendTestBase$TestPojo'])
def test_get_set_max_parallelism(self):
self.env.set_max_parallelism(12)
parallelism = self.env.get_max_parallelism()
self.assertEqual(parallelism, 12)
def test_operation_chaining(self):
self.assertTrue(self.env.is_chaining_enabled())
self.env.disable_operator_chaining()
self.assertFalse(self.env.is_chaining_enabled())
def test_get_checkpoint_config(self):
checkpoint_config = self.env.get_checkpoint_config()
self.assertIsInstance(checkpoint_config, CheckpointConfig)
def test_get_set_checkpoint_interval(self):
self.env.enable_checkpointing(30000)
interval = self.env.get_checkpoint_interval()
self.assertEqual(interval, 30000)
def test_get_set_checkpointing_mode(self):
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.EXACTLY_ONCE)
self.env.enable_checkpointing(30000, CheckpointingMode.AT_LEAST_ONCE)
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.AT_LEAST_ONCE)
def test_get_state_backend(self):
state_backend = self.env.get_state_backend()
self.assertIsNone(state_backend)
def test_set_state_backend(self):
input_backend = MemoryStateBackend()
self.env.set_state_backend(input_backend)
output_backend = self.env.get_state_backend()
self.assertEqual(output_backend._j_memory_state_backend,
input_backend._j_memory_state_backend)
def test_get_set_stream_time_characteristic(self):
default_time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(default_time_characteristic, TimeCharacteristic.ProcessingTime)
self.env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(time_characteristic, TimeCharacteristic.EventTime)
@unittest.skip("Python API does not support DataStream now. refactor this test later")
def test_get_execution_plan(self):
tmp_dir = tempfile.gettempdir()
source_path = os.path.join(tmp_dir + '/streaming.csv')
tmp_csv = os.path.join(tmp_dir + '/streaming2.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
t_env = StreamTableEnvironment.create(self.env)
csv_source = CsvTableSource(source_path, field_names, field_types)
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Results",
CsvTableSink(field_names, field_types, tmp_csv))
t_env.scan("Orders").insert_into("Results")
plan = self.env.get_execution_plan()
json.loads(plan)
def test_execute(self):
tmp_dir = tempfile.gettempdir()
field_names = ['a', 'b', 'c']
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env = StreamTableEnvironment.create(self.env)
t_env.register_table_sink(
'Results',
CsvTableSink(field_names, field_types,
os.path.join('{}/{}.csv'.format(tmp_dir, round(time.time())))))
t_env.insert_into('Results', t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']))
execution_result = t_env.execute('test_stream_execute')
self.assertIsNotNone(execution_result.get_job_id())
self.assertIsNotNone(execution_result.get_net_runtime())
self.assertEqual(len(execution_result.get_all_accumulator_results()), 0)
self.assertIsNone(execution_result.get_accumulator_result('accumulator'))
self.assertIsNotNone(str(execution_result))
def test_from_collection_without_data_types(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')])
test_sink = DataStreamTestSinkFunction()
ds.add_sink(test_sink)
self.env.execute("test from collection")
results = test_sink.get_results(True)
# user does not specify data types for input data, the collected result should be in
# in tuple format as inputs.
expected = ["(1, 'Hi', 'Hello')", "(2, 'Hello', 'Hi')"]
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_from_collection_with_data_types(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW([Types.INT(),
Types.STRING(),
Types.STRING()]))
test_sink = DataStreamTestSinkFunction()
ds.add_sink(test_sink)
self.env.execute("test from collection")
results = test_sink.get_results(False)
# if user specifies data types of input data, the collected result should be in row format.
expected = ['1,Hi,Hello', '2,Hello,Hi']
results.sort()
expected.sort()
self.assertEqual(expected, results)
|
import unittest
from hyperon import *
from common import MeTTa
class MettaTest(unittest.TestCase):
def test_adding_tokens_while_parsing(self):
metta = MeTTa()
atom = metta.parse_single('(A B)')
self.assertEquals(atom, E(S('A'), S('B')))
metta.add_atom('A', S('C'))
atom = metta.parse_single('(A B)')
self.assertEquals(atom, E(S('C'), S('B')))
# REM: currently, adding another atom for the same token
# doesn't change the previous binding
# This can be changed later
metta.add_atom('A', S('F'))
atom = metta.parse_single('(A B)')
self.assertEquals(atom, E(S('C'), S('B')))
|
import routes
import webob
class BaseMapper(routes.Mapper):
'''
'''
def routematch(self, url=None, environ=None):
'''
对某些route可以特殊处理
'''
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
'''
设置参数
'''
return routes.Mapper.connect(self, *args, **kargs)
def resource(self, member_name, collection_name, **kwargs):
'''
条目太多进行简化
member_name,路由名字
collection_name:路由名字复数
'''
routes.Mapper.resource(self, member_name,collection_name,**kwargs)
class BaseRouter(object):
'''
通用的url路由器
'''
def set_route(self):
raise NotImplementedError()
def __init__(self):
self._mapper = routes.Mapper()#
self.set_route()
self._router = routes.middleware.RoutesMiddleware(self._dispatch, self._mapper)
@webob.dec.wsgify
def __call__(self, req):
return self._router
@staticmethod
@webob.dec.wsgify()
def _dispatch(req):
match = req.environ['wsgiorg.routing_args'][1] #RoutesMiddleware设置
if not match:
return webob.exc.HTTPNotFound()
app = match['controller'] #app即为self._mapper.connect 设置的controller
return app |
# -*- coding: utf-8 -*-
"""User views."""
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
"""User detail view."""
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
"""User redirect view."""
permanent = False
def get_redirect_url(self):
"""Get redirect url: user detail."""
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
"""User update view."""
fields = ["name", "nickname", "aliases", "role", "email", "phone"]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
"""Success url: uer detail."""
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
"""Only get the User record for the user making the request."""
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
"""User list view."""
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
def transfer_user(old, new):
"""Transfer all objects relating to a user to another user.
Transfers all FK fields to User from old to new:
* u.reporter.all()
* u.observer.al()
* u.morphometric_handler.all()
* u.morphometric_recorder.all()
* u.tag_handler.all()
* u.tag_recorder.all()
* u.revision_set.all()
* u.statelog_set.all()
* u.expedition_team.all()
* u.surveyend_set.all()
* u.survey_set.all()
* u.survey_team.all()
* u.fileattachment_set.all()
* u.document_set.all()
"""
raise NotImplementedError("transfer_user needs to be implemented")
|
# Last updated on: 2020.11.29
import json
print("This program will append entries in a JSON file\n")
# Function to add to JSON
def writeJson(data, filename="data/data2.json"):
with open(filename, "w") as jsonNewFile:
json.dump(data, jsonNewFile, indent=2, sort_keys=True)
number = input("Please input a number: ")
text = input("Please input the text: ")
with open("data/data.json") as jsonFile:
data = json.load(jsonFile)
jsonEntry = {
"number": number,
"text": text
}
data.append(jsonEntry)
writeJson(data)
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from jams.mad import mad
from jams.date2dec import date2dec
def spikeflag(date, data, inflag, isday, outdir, window=13, iter=1,
fill_days=1, t_int=48, z=7, deriv=0, udef=-9999, spike_v=2,
plot=False):
'''
Spike detection for Eddy Covariance data (and basically all other data)
using a moving median absolute difference filter. Multiple iterations
possible. Originally coded by Tino Rau.
Definition
----------
spikeflag(date, data, inflag, isday, window=13, iter=1,
fill_days=1, t_int=48, z=5.5, deriv=0, udef=-9999, spike_v=2,
plot=False):
Input
-----
date np.array(N), julian date (used only for plotting)
data np.array(N,M), data array where spike detection is applied on
each column (M)
inflag np.array(N,M), dtype=int, quality flag of data, spike detection
is only applied where inflag=0, all other data is ignored
isday np.array(N), dtype=bool, True where it is day and False where
it is night
outdir path where plots are saved
Optional Input
--------------
window int, size of the moving window where mad is calculated in days
(default: 13)
iter int, how often the running window mad shall be applied
(default: 1)
fill_days int, number of days where mad is applied within moving window
(default: 1)
t_int int, number of data points within one day (default: 48)
z int/float, data is allowed to deviate maximum z standard
deviations from the median (default: 7)
deriv int, 0: Act on raw data; 1: use first derivatives;
2: use 2nd derivatives (default: 0)
udef int/float, missing value of data (default: -9999) NaN values are
excluded from computations anyhow.
spike_v int, spike value which shall be returned when a spike is
detected (default: 2)
plot bool, if True data and spikes are plotted (default: False)
Output
------
flag np.array(N), flag array where everything is 0 except where
spikes were detected, there it is spike_v.
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014 Arndt Piayda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, AP, Aug 2014
'''
rows, cols = np.shape(data)
flag = np.zeros_like(inflag).astype(np.int)
# mad window length and flag window length
period = np.int(window*t_int)/2
fill_win = np.int(fill_days*t_int)/2
# calculate dusk and dawn times and separate in day and night
isdawn = np.zeros(rows,dtype=np.bool)
isdusk = np.zeros(rows,dtype=np.bool)
dis = (isday.astype(int) - np.roll(isday,-1).astype(int)).astype(bool)
isdawn[:-1] = np.where(dis[:-1] == -1, True, False)
isdusk[:-1] = np.where(dis[:-1] == 1, True, False)
isddday = isdawn
tmp = np.roll(isdusk,1)
isddday[1:] += tmp[1:]
isddnight = isdusk
tmp = np.roll(isdawn,1)
isddnight[1:] += tmp[1:]
# iterate over each column of data
for col in xrange(cols):
# iterate as much as iter
for i in xrange(iter):
# get day and night data#
day_data = np.where((isday | isddday) & (inflag[:,col]==0) &
((data[:,col]!=udef) | (~np.isnan(data[:,col]))),
data[:,col], np.nan)
night_data = np.where((~isday | isddnight) & (inflag[:,col]==0) &
((data[:,col]!=udef) | (~np.isnan(data[:,col]))),
data[:,col], np.nan)
# iterate over flag window
fill_points = xrange(fill_win, isday.size-1, 2*fill_win)
for j in fill_points:
j1 = np.max([ j - period - 1,0])
j2 = np.min([ j + period + 1,isday.size])
fill_start = np.max([ j - fill_win,1])
fill_end = np.min([ j + fill_win,isday.size-1])
day_flag = mad(np.ma.masked_array(data=day_data[j1:j2],
mask=(np.isnan(day_data[j1:j2]))),
z=z, deriv=deriv)
flag[fill_start:fill_end,col] += np.where(day_flag[fill_start-j1-1:fill_end-j1-1],
spike_v, 0)
night_flag = mad(np.ma.masked_array(data=night_data[j1:j2],
mask=(np.isnan(night_data[j1:j2]))),
z=z, deriv=deriv)
flag[fill_start:fill_end,col] += np.where(night_flag[fill_start-j1-1:fill_end-j1-1],
spike_v, 0)
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf as pdf
majticks = mpl.dates.MonthLocator(bymonthday=1)
format_str='%d %m %Y %H:%M'
date01 = date2dec(yr=1, mo=1, dy=2, hr=0, mi=0, sc=0)
fig1 = plt.figure(1)
sub1 = fig1.add_subplot(111)
valid = (inflag[:,col]==0) & ((data[:,col]!=udef) |
(~np.isnan(data[:,col])))
l1 =sub1.plot(date[valid]-date01, data[valid,col], '-b')
l2 =sub1.plot(date[flag[:,col]!=0]-date01, data[flag[:,col]!=0,col], 'or')
sub1.xaxis.set_major_locator(majticks)
sub1.xaxis.set_major_formatter(mpl.dates.DateFormatter(format_str))
fig1.autofmt_xdate()
plt.show()
pp1 = pdf.PdfPages(outdir+'/spike_%i.pdf'%col)
fig1.savefig(pp1, format='pdf')
pp1.close()
return flag
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# coding: utf-8
"""
This will add the executable to your PATH so it will be found.
The filename of the binary is stored in `chromedriver_filename`.
"""
import os
from . import utils
def add_chromedriver_to_path():
"""
Appends the directory of the chromedriver binary file to PATH.
"""
chromedriver_dir = os.path.abspath(os.path.dirname(__file__))
if 'PATH' not in os.environ:
os.environ['PATH'] = chromedriver_dir
elif chromedriver_dir not in os.environ['PATH']:
os.environ['PATH'] = chromedriver_dir + utils.get_variable_separator() + os.environ['PATH']
chromedriver_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), utils.get_chromedriver_filename())
add_chromedriver_to_path()
|
"""
Provides a dictionary mapping atlas string IDs to a dictionary of files
required for parcellation.
"""
from typing import Dict
from brain_parts.parcellation.atlases.brainnetome import BRAINNETOME
#: Available atlases to use for parcellation.
PARCELLATION_FILES: Dict[str, Dict] = {"brainnetome": BRAINNETOME}
|
from math import pi
import cv2
""" Utility Functions """
def load_image(img_path, shape=None):
img = cv2.imread(img_path)
if shape is not None:
img = cv2.resize(img, shape)
return img
def save_image(img_path, img):
cv2.imwrite(img_path, img)
def get_rad(theta, phi, gamma):
return (deg_to_rad(theta),
deg_to_rad(phi),
deg_to_rad(gamma))
def get_deg(rtheta, rphi, rgamma):
return (rad_to_deg(rtheta),
rad_to_deg(rphi),
rad_to_deg(rgamma))
def deg_to_rad(deg):
return deg * pi / 180.0
def rad_to_deg(rad):
return deg * 180.0 / pi
|
###############################################################
################# https://www.fardanesh.ir ####################
###############################################################
from openpyxl import load_workbook
wb=load_workbook('lecture05/list1.xlsx')
ws1=wb.active
# https://openpyxl.readthedocs.io/en/stable/_modules/index.html
# add a new row before the existing row 5:
# ws1.insert_rows(5)
# add 10 new rows before the existing row 5:
# ws1.insert_rows(5,amount=10)
# ws1.insert_rows(5,10)
# add a new column before the existing column 1:
# ws1.insert_cols(1)
# add 3 new column before the existing column 1:
# ws1.insert_cols(5,amount=3)
# delete row 2
# ws1.delete_rows(2)
# delete rows 2 to 10
# ws1.delete_rows(2,8)
# move a range without any change in formulas
# ws1.move_range("A1:H20", rows=6, cols=2)
# move a range wit formula-translation
# ws1.move_range("A1:H20", rows=6, cols=2,translate=True)
# merge cells
# ws1.merge_cells('A1:A5')
# ws1.merge_cells(start_row=2, start_column=2, end_row=8, end_column=8)
# ws1.unmerge_cells('A1:A5')
# ws1.freeze_panes='D2'
# ws1.freeze_panes=None
wb.save(filename='lecture05/list1Modified.xlsx')
|
## Scramblies
## 5 kyu
## https://www.codewars.com//kata/55c04b4cc56a697bb0000048
from collections import Counter
def scramble(s1, s2):
s1, s2 = Counter(s1), Counter(s2)
return True if len(s2 - s1) == 0 else False
|
#!/usr/bin/env python
from glob import glob
import os
import cv2
pngs = glob('C:\data\cifar\\train/*.png')
data_dir = "C:\data\cifar\\train"
output_dir = "C:\data\cifar10\\train"
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck]']
for c in classes:
if not os.path.exists(os.path.join(output_dir, c)):
os.mkdir(os.path.join(output_dir, c))
class_path = os.path.join(output_dir, c)
for j in pngs:
if c in j:
img = cv2.imread(j)
cv2.imwrite(os.path.join(output_dir, c, j.split(os.path.sep)[-1].split('.')[0]+ '.jpg'), img)
|
"""
Copyright (C) 2010-2022 Alibaba Group Holding Limited.
This file is modified from
https://github.com/tjiiv-cprg/MonoRUn
"""
import cv2
import numpy as np
from .. import compute_box_3d
def compute_box_bev(label, with_arrow=True):
ry = label[6]
bl = label[0]
bw = label[2]
t = label[3:6][:, None]
r_mat = np.array([[+np.cos(ry), 0, +np.sin(ry)],
[0, 1, 0],
[-np.sin(ry), 0, +np.cos(ry)]])
if with_arrow:
corners = np.array([[bl / 2, bl / 2, -bl / 2, -bl / 2,
bl / 2, bl / 2 + bw / 2, bl / 2],
[0, 0, 0, 0, 0, 0, 0],
[bw / 2, -bw / 2, -bw / 2, bw / 2,
bw / 2, 0, -bw / 2]])
else:
corners = np.array([[bl / 2, bl / 2, -bl / 2, -bl / 2, bl / 2],
[0, 0, 0, 0, 0],
[bw / 2, -bw / 2, -bw / 2, bw / 2, bw / 2]])
corners = r_mat @ corners + t
return corners
def show_bev(
img, bbox_results, bbox_3d_results, cali_mat, width=None, height=None,
scale=10, pose_samples=None, pose_sample_weights=None, orientation=None,
gt_bboxes_3d=None, gt_orientation=None, score_thr=0.1, thickness=2,
frustum_color=(220, 220, 220), box_color=(10, 60, 240), gt_box_color=(20, 180, 20),
sample_color=(0.9, 0.5, 0.1), intensity_scale=40, sample_size=3, range_mark=[10, 20, 30, 40, 50, 60]):
"""
Args:
bbox_results (list[ndarray]): multiclass results,
in format [x1, y1, x2, y2, score]
bbox_3d_results (list[ndarray]): multiclass results,
in format [l, h, w, x, y, z, ry, score, ...]
"""
if (width is None) or (height is None):
height, width = img.shape[:2]
bev_img = np.full((height, width, 3), 255, dtype=np.uint8)
origin = np.array([int(width / 16), int(height / 2)])
# preproc
fx = cali_mat[0, 0]
cx = cali_mat[0, 2]
proj_mat = np.array(
[[0, scale],
[scale, 0]])
# draw FOV line
fov_line_x_extend = (-100, 100)
end_pt_left = np.array([fov_line_x_extend[0],
-(fx * fov_line_x_extend[0] / cx)])
end_pt_right = np.array([fov_line_x_extend[1],
-(fx * fov_line_x_extend[1] / (cx - img.shape[1] + 1))])
end_pt_left = np.round(proj_mat @ end_pt_left) + origin
end_pt_right = np.round(proj_mat @ end_pt_right) + origin
frustum = np.stack((origin,
end_pt_right,
end_pt_left), axis=0).astype(np.int)
cv2.fillPoly(bev_img, pts=[frustum], color=frustum_color)
for range_mark_single in range_mark:
cv2.circle(bev_img, origin * 8, round(range_mark_single * 8 * scale),
(255, 255, 255), thickness=thickness, shift=3)
bbox_3d_results = np.concatenate(bbox_3d_results, axis=0)
score = bbox_3d_results[:, 7]
mask = score >= score_thr
bbox_3d_results = bbox_3d_results[mask]
if pose_samples is not None and pose_sample_weights is not None:
pose_samples = np.concatenate(pose_samples, axis=0)
pose_sample_weights = np.concatenate(pose_sample_weights, axis=0)
scores = np.concatenate([bbox_single[:, 4] for bbox_single in bbox_results], axis=0)
pose_sample_weights = pose_sample_weights * (scores[:, None] * intensity_scale)
# (num_obj, num_sample, 2)
pose_bev = (np.round(pose_samples[..., [0, 2]] @ proj_mat.T) + origin).astype(np.int64)
in_bev_mask = (pose_bev >= 0).all(axis=-1) & (pose_bev[..., 0] < width) & (pose_bev[..., 1] < height)
pose_bev = pose_bev[in_bev_mask] # (n, 2)
pose_sample_weights = pose_sample_weights[in_bev_mask] # (n, )
sample_inds = pose_bev[:, 1] * width + pose_bev[:, 0] # (n, )
density = np.bincount(
sample_inds, weights=pose_sample_weights, minlength=height * width
).reshape(height, width)
density = cv2.blur(density, [sample_size, sample_size]) * (sample_size * sample_size)
colored_density = np.power(sample_color, density[..., None])
bev_img = (bev_img * colored_density).astype(np.uint8)
if orientation is not None:
orientation = np.concatenate(orientation, axis=0)
orientation = orientation[mask]
# draw det results:
for i, bbox_3d_result_single in enumerate(bbox_3d_results):
# draw boxes
with_arrow = orientation[i] if orientation is not None else None
corners = compute_box_bev(bbox_3d_result_single, with_arrow=with_arrow)
corners_bev = (proj_mat @ corners[[0, 2], :]).T + origin
cv2.polylines(bev_img, corners_bev.astype(np.int32)[None, ...], False,
box_color, thickness=thickness)
if gt_bboxes_3d is not None and gt_orientation is not None:
for gt_bbox_3d, with_arrow in zip(gt_bboxes_3d, gt_orientation):
corners = compute_box_bev(gt_bbox_3d, with_arrow=with_arrow)
corners_bev = (proj_mat @ corners[[0, 2], :]).T + origin
cv2.polylines(bev_img, corners_bev.astype(np.int32)[None, ...], False,
gt_box_color, thickness=thickness)
return bev_img
def draw_box_3d_pred(image, bbox_3d_results, cam_intrinsic, score_thr=0.1, z_clip=0.1,
color=(10, 60, 240), thickness=2):
"""
Args:
bbox_3d_results (list[ndarray]): multiclass results,
in format [l, h, w, x, y, z, ry, score, ...]
"""
bbox_3d_results = np.concatenate(bbox_3d_results, axis=0)
sort_idx = np.argsort(bbox_3d_results[:, 5])[::-1]
bbox_3d_results = bbox_3d_results[sort_idx]
for bbox_3d in bbox_3d_results:
if bbox_3d[7] < score_thr:
continue
corners, edge_idx = compute_box_3d(bbox_3d)
corners_in_front = corners[:, 2] >= z_clip
edges_0_in_front = corners_in_front[edge_idx[:, 0]]
edges_1_in_front = corners_in_front[edge_idx[:, 1]]
edges_in_front = edges_0_in_front & edges_1_in_front
edge_idx_in_front = edge_idx[edges_in_front]
# project to image
corners_2d = (proj_to_img(corners, cam_intrinsic, z_clip=z_clip)
* 8).astype(np.int)
if np.any(edges_in_front):
lines = np.stack([corners_2d[edge_idx_single]
for edge_idx_single in edge_idx_in_front],
axis=0) # (n, 2, 2)
cv2.polylines(image, lines, False, color,
thickness=thickness, shift=3)
# compute intersection
edges_clipped = edges_0_in_front ^ edges_1_in_front
if np.any(edges_clipped):
edge_idx_to_clip = edge_idx[edges_clipped]
edges_0 = corners[edge_idx_to_clip[:, 0]]
edges_1 = corners[edge_idx_to_clip[:, 1]]
z0 = edges_0[:, 2]
z1 = edges_1[:, 2]
weight_0 = z1 - z_clip
weight_1 = z_clip - z0
intersection = (edges_0 * weight_0[:, None] + edges_1 * weight_1[:, None]
) * (1 / (z1 - z0)).clip(min=-1e6, max=1e6)[:, None]
keep_idx = np.where(z0 > z_clip,
edge_idx_to_clip[:, 0],
edge_idx_to_clip[:, 1])
intersection_2d = (proj_to_img(intersection, cam_intrinsic, z_clip=z_clip)
* 8).astype(np.int) # (n, 2)
keep_2d = corners_2d[keep_idx] # (n, 2)
lines = np.stack([keep_2d, intersection_2d], axis=1) # (n, 2, 2)
cv2.polylines(image, lines, False, color,
thickness=thickness, shift=3)
return
def proj_to_img(pts, proj_mat, z_clip=1e-4):
pts_2d = pts @ proj_mat.T
pts_2d = pts_2d[:, :2] / pts_2d[:, 2:].clip(min=z_clip)
return pts_2d
|
"""
LC568. Maximum Vacation Days
LeetCode wants to give one of its best employees the option to travel among N cities to collect algorithm problems. But all work and no play makes Jack a dull boy, you could take vacations in some particular cities and weeks. Your job is to schedule the traveling to maximize the number of vacation days you could take, but there are certain rules and restrictions you need to follow.
Rules and restrictions:
You can only travel among N cities, represented by indexes from 0 to N-1. Initially, you are in the city indexed 0 on Monday.
The cities are connected by flights. The flights are represented as a N*N matrix (not necessary symmetrical), called flights representing the airline status from the city i to the city j. If there is no flight from the city i to the city j, flights[i][j] = 0; Otherwise, flights[i][j] = 1. Also, flights[i][i] = 0 for all i.
You totally have K weeks (each week has 7 days) to travel. You can only take flights at most once per day and can only take flights on each week's Monday morning. Since flight time is so short, we don't consider the impact of flight time.
For each city, you can only have restricted vacation days in different weeks, given an N*K matrix called days representing this relationship. For the value of days[i][j], it represents the maximum days you could take vacation in the city i in the week j.
You're given the flights matrix and days matrix, and you need to output the maximum vacation days you could take during K weeks.
"""
# 2d dp
# first time finish a hard without any reference! and myself optimization!
# Runtime: 2432 ms, faster than 56.55% of Python3 online submissions for Maximum Vacation Days.
# Memory Usage: 14.2 MB, less than 50.00% of Python3 online submissions for Maximum Vacation Days.
class Solution:
def maxVacationDays(self, flights: List[List[int]], days: List[List[int]]) -> int:
if len(flights) == 0 or len(days) == 0:
return 0
n_city = len(flights)
n_weeks = len(days[0])
if n_weeks == 0:
return 0
# every one can stay at the same place
for i in range(n_city):
flights[i][i] = 1
connect_origin = flights[0].copy()
prev_days = [days[i][0] * flights[0][i] for i in range(n_city)]
for i in range(1, n_weeks):
new_days = [0 for _ in range(n_city)]
for j in range(n_city):
# update connect_origin
for n in range(n_city):
if connect_origin[n] * flights[n][j] == 1:
connect_origin[j] = 1
break
if connect_origin[j] != 0:
max_extra_days = max(prev_days[n]*flights[n][j] for n in range(n_city))
new_days[j] = days[j][i] + max(prev_days[j], max_extra_days)
prev_days = new_days.copy()
return max(prev_days)
|
import os
os.environ['GITHUB_APP_INTEGRATION_ID'] = '1234'
os.environ['GITHUB_APP_PRIVATE_KEY'] = 'ABCD'
os.environ['CRON_TOKEN'] = 'XYZ'
os.environ['STALE_ISSUE_CLOSE'] = 'TRUE'
os.environ['STALE_ISSUE_CLOSE_SECONDS'] = '120'
os.environ['STALE_ISSUE_WARN_SECONDS'] = '60'
os.environ['STALE_PULL_REQUEST_CLOSE'] = 'TRUE'
os.environ['STALE_PULL_REQUEST_CLOSE_SECONDS'] = '120'
os.environ['STALE_PULL_REQUEST_WARN_SECONDS'] = '60'
|
from common.database import *
from common.constants import *
from datetime import datetime
from common.utils import write_compressed_tsv_file_from_dataframe
def _write_entity_list_for_LMDB(entity_node_label: str, database: Database, output_dir: str):
query = f"match (n:{entity_node_label}:{NODE_MESH})-[:HAS_SYNONYM]-(s) return n.eid as id, n.name as name, s.name as synonym, n.data_source as data_source"
df = database.get_data(query)
datestr = datetime.now().strftime("%m%d%Y")
filename = f"{entity_node_label}_list_for_LMDB_{datestr}.tsv"
print("write", filename)
write_compressed_tsv_file_from_dataframe(df, filename, output_dir)
def write_mesh_annotation_files(database, output_dir):
_write_entity_list_for_LMDB(NODE_DISEASE, database, output_dir)
_write_entity_list_for_LMDB(NODE_FOOD, database, output_dir)
_write_entity_list_for_LMDB(NODE_ANATOMY, database, output_dir)
_write_entity_list_for_LMDB(NODE_PHENOMENA, database, output_dir)
|
import concurrent.futures
import math
import os
from ctypes import CFUNCTYPE, POINTER, c_double, c_int32
from pathlib import Path
import llvmlite.binding
import numpy as np
from lleaves import compiler
from lleaves.data_processing import (
data_to_ndarray,
extract_model_global_features,
extract_pandas_traintime_categories,
ndarray_to_ptr,
)
from lleaves.llvm_binding import compile_module_to_asm
ENTRY_FUNC_TYPE = CFUNCTYPE(
None, # return void
POINTER(c_double), # pointer to data array
POINTER(c_double), # pointer to results array
c_int32, # start index
c_int32, # end index
)
class Model:
"""
The base class of lleaves.
"""
# machine-targeted compiler & exec engine.
# We keep this as an object property to protect the compiled binary from being garbage-collected
_execution_engine = None
# number of features (=columns)
_n_feature = None
# number of classes
_n_classes = None
# prediction function, drops GIL on entry
_c_entry_func = None
def __init__(self, model_file):
"""
Initialize the uncompiled model.
:param model_file: Path to the model.txt.
"""
self.model_file = model_file
self.is_compiled = False
self._pandas_categorical = extract_pandas_traintime_categories(model_file)
num_attrs = extract_model_global_features(model_file)
self._n_feature = num_attrs["n_feature"]
self._n_classes = num_attrs["n_class"]
self._n_trees = num_attrs["n_trees"]
def num_feature(self):
"""
Returns the number of features used by this model.
"""
return self._n_feature
def num_model_per_iteration(self):
"""
Returns the number of models per iteration.
This is equal to the number of classes for multiclass models, else will be 1.
"""
return self._n_classes
def num_trees(self):
"""
Returns the number of trees in this model.
"""
return self._n_trees
def compile(
self,
cache=None,
*,
raw_score=False,
fblocksize=34,
fcodemodel="large",
finline=True,
):
"""
Generate the LLVM IR for this model and compile it to ASM.
For most users tweaking the compilation flags (fcodemodel, fblocksize, finline) will be unnecessary
as the default configuration is already very fast.
Modifying the flags is useful only if you're trying to squeeze out the last few percent of performance.
:param cache: Path to a cache file. If this path doesn't exist, binary will be dumped at path after compilation.
If path exists, binary will be loaded and compilation skipped.
No effort is made to check staleness / consistency.
:param raw_score: If true, compile the tree to always return raw predictions, without applying
the objective function. Equivalent to the `raw_score` parameter of LightGBM's Booster.predict().
:param fblocksize: Trees are cache-blocked into blocks of this size, reducing the icache miss-rate.
For deep trees or small caches a lower blocksize is better. For single-row predictions cache-blocking
adds overhead, set `fblocksize=Model.num_trees()` to disable it.
:param fcodemodel: The LLVM codemodel. Relates to the maximum offsets that may appear in an ASM instruction.
One of {"small", "large"}.
The small codemodel will give speedups for most forests, but will segfault when used for compiling
very large forests.
:param finline: Whether or not to inline function. Setting this to False will speed-up compilation time
significantly but will slow down prediction.
"""
assert 0 < fblocksize
assert fcodemodel in ("small", "large")
if cache is None or not Path(cache).exists():
module = compiler.compile_to_module(
self.model_file,
raw_score=raw_score,
fblocksize=fblocksize,
finline=finline,
)
else:
# when loading binary from cache we use a dummy empty module
module = llvmlite.binding.parse_assembly("")
# keep a reference to the engine to protect it from being garbage-collected
self._execution_engine = compile_module_to_asm(
module, cache, fcodemodel=fcodemodel
)
# Drops GIL during call, re-acquires it after
addr = self._execution_engine.get_function_address("forest_root")
self._c_entry_func = ENTRY_FUNC_TYPE(addr)
self.is_compiled = True
def predict(self, data, n_jobs=os.cpu_count()):
"""
Return predictions for the given data.
The model needs to be compiled before prediction.
:param data: Pandas df, numpy 2D array or Python list. Shape should be (n_rows, model.num_feature()).
2D float64 numpy arrays have the lowest overhead.
:param n_jobs: Number of threads to use for prediction. Defaults to number of CPUs. For single-row prediction
this should be set to 1.
:return: 1D numpy array, dtype float64.
If multiclass model: 2D numpy array of shape (n_rows, model.num_model_per_iteration())
"""
if not self.is_compiled:
raise RuntimeError(
"Functionality only available after compilation. Run model.compile()."
)
# convert all input types to numpy arrays
data = data_to_ndarray(data, self._pandas_categorical)
n_predictions = data.shape[0]
if len(data.shape) != 2 or data.shape[1] != self.num_feature():
raise ValueError(
f"Data must be of dimension (N, {self.num_feature()}), is {data.shape}."
)
# protect against `ctypes.c_int32` silently overflowing and causing SIGSEGV
if n_predictions >= 2 ** 31 - 1:
raise ValueError(
"Prediction is not supported for datasets with >=2^31-1 rows. "
"Split the dataset into smaller chunks first."
)
# setup input data and predictions array
ptr_data = ndarray_to_ptr(data)
pred_shape = (
n_predictions if self._n_classes == 1 else (n_predictions, self._n_classes)
)
predictions = np.zeros(pred_shape, dtype=np.float64)
ptr_preds = ndarray_to_ptr(predictions)
if n_jobs == 1:
self._c_entry_func(ptr_data, ptr_preds, 0, n_predictions)
else:
batchsize = math.ceil(n_predictions / n_jobs)
with concurrent.futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
for i in range(0, n_predictions, batchsize):
executor.submit(
lambda start_idx: self._c_entry_func(
ptr_data,
ptr_preds,
start_idx,
min(start_idx + batchsize, n_predictions),
),
i,
)
return predictions
|
import cv2
import os
def main():
img_w=416
img_h=416
img_org_path='D:/org/'
img_con_path='D:/con_picture/'
img_list=os.listdir(img_org_path)
if not os.path.exists(img_con_path):
os.makedirs(img_con_path)
i=0
for file in img_list:
i=i+1
img = cv2.imread(img_org_path+file)
res = cv2.resize(img, (img_w, img_h), interpolation=cv2.INTER_AREA)
cv2.imwrite(img_con_path+str(i).zfill(3)+'.jpg',res)
print("Done~~")
if __name__=='__main__':
main() |
"""
Usage:
./sort.py [options]
Options:
--list-id=<id>
"""
import docopt
import requests
from flask.json import jsonify
from auth import client_id, get_access_token
def main():
token = get_access_token()
arguments = docopt.docopt(__doc__)
list_id = arguments['--list-id']
if list_id:
response = requests.get(f'http://a.wunderlist.com/api/v1/tasks', {'list_id': list_id}, headers={'X-Access-Token': token, 'X-Client-ID': client_id})
assert response.status_code == 200
json = response.json()
json.sort(key=lambda item: item['created_at'])
json.reverse()
sorted_ids = [item['id'] for item in json]
response = requests.get(f'http://a.wunderlist.com/api/v1/task_positions', {'list_id': list_id}, headers={'X-Access-Token': token, 'X-Client-ID': client_id})
assert response.status_code == 200
positions_json = response.json()
if positions_json[0]['values'] == sorted_ids:
print('Nothing changed.')
else:
response = requests.put(f'http://a.wunderlist.com/api/v1/task_positions/{positions_json[0]["id"]}', json={'revision': positions_json[0]['revision'], 'values': [item['id'] for item in json]}, headers={'X-Access-Token': token, 'X-Client-ID': client_id})
assert response.status_code == 200
print('Done!')
else:
response = requests.get('http://a.wunderlist.com/api/v1/lists', headers={'X-Access-Token': token, 'X-Client-ID': client_id})
assert response.status_code == 200
json = response.json()
for item in json:
print(f"{item['title']}: {item['id']}")
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, division, print_function
import os
import math
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import (CONFIG_NAME, WEIGHTS_NAME, BertConfig, BertPreTrainedModel, BertModel)
from pytorch_pretrained_bert.tokenization import BertTokenizer
from torch.nn import CrossEntropyLoss
from pytorch_pretrained_bert.crf import CRF
DEFAULT_HPARA = {
'max_seq_length': 128,
'max_ngram_size': 128,
'max_ngram_length': 5,
'do_lower_case': False,
'do_train': False,
'use_memory': False
}
############## MODIFIED ##############
class WordKVMN(nn.Module):
def __init__(self, hidden_size, word_size):
super(WordKVMN, self).__init__()
self.temper = hidden_size ** 0.5
self.word_embedding_a = nn.Embedding(word_size, hidden_size)
self.word_embedding_c = nn.Embedding(10, hidden_size)
self.word_embedding_d = nn.Embedding(word_size, hidden_size)
self.word_embedding_e = nn.Embedding(word_size, hidden_size)
def forward(self, word_seq, hidden_state, label_value_matrix, word_mask_metrix, freq, avfreq):
# embedding_a is the embedding for grams (keys), embedding_c is the embedding for labels (values)
# word_seq is a tensor that has the indices of the ngrams present in the lexicon for each sentence
# label_value_matrix is a tensor that has (character, ngram) = the label (S, E, B, I)
# hidden_state is the encoded characters for each sentence
embedding_a = self.word_embedding_a(word_seq)
embedding_c = self.word_embedding_c(label_value_matrix)
embedding_d = self.word_embedding_d(freq)
embedding_e = self.word_embedding_e(avfreq)
embedding_a = embedding_a.permute(0, 2, 1)
# tensor containing every character of each sentence multiplied by every ngram in the sentence
u = torch.matmul(hidden_state, embedding_a) / self.temper
# tmp_word_mask_metrix is the label_value_matrix clamped to 0,1
tmp_word_mask_metrix = torch.clamp(word_mask_metrix, 0, 1)
exp_u = torch.exp(u)
# elmentwise operation of [(character, ngram) = value] x [(character, ngram) = label present or not], gives only character-ngram combinations that exist
delta_exp_u = torch.mul(exp_u, tmp_word_mask_metrix)
# reduce effect of ngrams according to their size and av score
# delta_exp_u = torch.add(delta_exp_u, avfreq)
# delta_exp_u = torch.add(delta_exp_u, freq)
# sum up the all the ngram values for each character
sum_delta_exp_u = torch.stack([torch.sum(delta_exp_u, 2)] * delta_exp_u.shape[2], 2)
# p is (character, ngram) = probablity of character forming ngram
p = torch.div(delta_exp_u, sum_delta_exp_u + 1e-10)
# (features, ngrams, characters, embeddings) = (embeddings, features, characters, ngrams)
embedding_c = embedding_c.permute(3, 0, 1, 2)
embedding_d = embedding_d.permute(3, 0, 1, 2)
embedding_e = embedding_e.permute(3, 0, 1, 2)
# probability * label_id
o = torch.mul(p, embedding_c)
o = torch.add(o, embedding_d)
o = torch.add(o, embedding_e)
# o = (embeddings, features, characters, ngrams)
o = o.permute(1, 2, 3, 0)
# o = (features, characters, ngrams, embeddings)
# sum up all ngrams for each character
o = torch.sum(o, 2)
# o = (features, characters, embeddings)
o = torch.add(o, hidden_state)
return o
############## MODIFIED ##############
class WMSeg(nn.Module):
def __init__(self, word2id, gram2id, gramfreq, av, labelmap, hpara, args):
super().__init__()
self.spec = locals()
self.spec.pop("self")
self.spec.pop("__class__")
self.spec.pop('args')
self.word2id = word2id
self.gram2id = gram2id
self.gramfreq = gramfreq
self.av = av
self.labelmap = labelmap
self.hpara = hpara
self.num_labels = len(self.labelmap) + 1
self.max_seq_length = self.hpara['max_seq_length']
self.max_ngram_size = self.hpara['max_ngram_size']
self.max_ngram_length = self.hpara['max_ngram_length']
self.bert_tokenizer = None
self.bert = None
if args.do_train:
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE),
'distributed_{}'.format(args.local_rank))
self.bert_tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=self.hpara['do_lower_case'])
print(args.bert_model)
self.bert = BertModel.from_pretrained(args.bert_model, cache_dir=cache_dir)
self.hpara['bert_tokenizer'] = self.bert_tokenizer
self.hpara['config'] = self.bert.config
else:
self.bert_tokenizer = self.hpara['bert_tokenizer']
self.bert = BertModel(self.hpara['config'])
hidden_size = self.bert.config.hidden_size
self.dropout = nn.Dropout(self.bert.config.hidden_dropout_prob)
if self.hpara['use_memory']:
self.kv_memory = WordKVMN(hidden_size, len(gram2id))
else:
self.kv_memory = None
self.classifier = nn.Linear(hidden_size, self.num_labels, bias=False)
self.crf = CRF(tagset_size=self.num_labels - 3, gpu=True)
if args.do_train:
self.spec['hpara'] = self.hpara
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, valid_ids=None,
attention_mask_label=None, word_seq=None, label_value_matrix=None, word_mask=None, freq=None, avfreq=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
if self.kv_memory is not None:
sequence_output = self.kv_memory(word_seq, sequence_output, label_value_matrix, word_mask, freq, avfreq)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
total_loss = self.crf.neg_log_likelihood_loss(logits, attention_mask, labels)
scores, tag_seq = self.crf._viterbi_decode(logits, attention_mask)
return total_loss, tag_seq
@staticmethod
def init_hyper_parameters(args):
hyper_parameters = DEFAULT_HPARA.copy()
hyper_parameters['max_seq_length'] = args.max_seq_length
hyper_parameters['max_ngram_size'] = args.max_ngram_size
hyper_parameters['max_ngram_length'] = args.max_ngram_length
hyper_parameters['do_lower_case'] = args.do_lower_case
hyper_parameters['use_memory'] = args.use_memory
return hyper_parameters
@property
def model(self):
return self.state_dict()
@classmethod
def from_spec(cls, spec, model, args):
spec = spec.copy()
res = cls(args=args, **spec)
res.load_state_dict(model)
return res
def load_data(self, data_path, do_predict=False):
if not do_predict:
flag = data_path[data_path.rfind('/')+1: data_path.rfind('.')]
lines = readfile(data_path, flag=flag)
else:
flag = 'predict'
lines = readsentence(data_path)
data = []
for sentence, label in lines:
if self.kv_memory is not None:
word_list = []
matching_position = []
for i in range(len(sentence)):
for j in range(self.max_ngram_length):
if i + j > len(sentence):
break
word = ''.join(sentence[i: i + j + 1])
if word in self.gram2id:
try:
index = word_list.index(word)
except ValueError:
word_list.append(word)
index = len(word_list) - 1
word_len = len(word)
for k in range(j + 1):
if word_len == 1:
l = 'S'
elif k == 0:
l = 'B'
elif k == j:
l = 'E'
else:
l = 'I'
matching_position.append((i + k, index, l))
else:
word_list = None
matching_position = None
data.append((sentence, label, word_list, matching_position))
examples = []
for i, (sentence, label, word_list, matching_position) in enumerate(data):
guid = "%s-%s" % (flag, i)
text_a = ' '.join(sentence)
text_b = None
if word_list is not None:
word = ' '.join(word_list)
else:
word = None
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b,
label=label, word=word, matrix=matching_position))
return examples
############ MODIFIED ##########
def convert_examples_to_features(self, examples):
max_seq_length = min(int(max([len(e.text_a.split(' ')) for e in examples]) * 1.1 + 2), self.max_seq_length)
if self.kv_memory is not None:
max_word_size = max(min(max([len(e.word.split(' ')) for e in examples]), self.max_ngram_size), 1)
features = []
tokenizer = self.bert_tokenizer
for (ex_index, example) in enumerate(examples):
textlist = example.text_a.split(' ')
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
freq = []
avfreq = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
valid.append(1)
labels.append(label_1)
label_mask.append(1)
else:
valid.append(0)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(self.labelmap["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i:
label_ids.append(self.labelmap[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(self.labelmap["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
label_mask = [1] * len(label_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
if self.kv_memory is not None:
wordlist = example.word
wordlist = wordlist.split(' ') if len(wordlist) > 0 else []
matching_position = example.matrix
word_ids = []
matching_matrix = np.zeros((max_seq_length, max_word_size), dtype=np.int)
freq = np.zeros((max_seq_length, max_word_size), dtype=np.int)
avfreq = np.zeros((max_seq_length, max_word_size), dtype=np.int)
if len(wordlist) > max_word_size:
wordlist = wordlist[:max_word_size]
for word in wordlist:
try:
word_ids.append(self.gram2id[word])
except KeyError:
print(word)
print(wordlist)
print(textlist)
raise KeyError()
while len(word_ids) < max_word_size:
word_ids.append(0)
for position in matching_position:
char_p = position[0] + 1
word_p = position[1]
if char_p > max_seq_length - 2 or word_p > max_word_size - 1:
continue
else:
matching_matrix[char_p][word_p] = self.labelmap[position[2]]
freq[char_p][word_p] = self.gramfreq[word_p]
avfreq[char_p][word_p] = self.av[word_p]
assert len(word_ids) == max_word_size
else:
word_ids = None
matching_matrix = None
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
valid_ids=valid,
label_mask=label_mask,
word_ids=word_ids,
matching_matrix=matching_matrix,
freq=freq,
avfreq=avfreq
))
return features
############## MODIFIED ##############
def feature2input(self, device, feature):
all_input_ids = torch.tensor([f.input_ids for f in feature], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in feature], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in feature], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in feature], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in feature], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in feature], dtype=torch.long)
input_ids = all_input_ids.to(device)
input_mask = all_input_mask.to(device)
segment_ids = all_segment_ids.to(device)
label_ids = all_label_ids.to(device)
valid_ids = all_valid_ids.to(device)
l_mask = all_lmask_ids.to(device)
if self.hpara['use_memory']:
all_word_ids = torch.tensor([f.word_ids for f in feature], dtype=torch.long)
all_matching_matrix = torch.tensor([f.matching_matrix for f in feature], dtype=torch.long)
all_word_mask = torch.tensor([f.matching_matrix for f in feature], dtype=torch.float)
all_freq = torch.tensor([f.freq for f in feature], dtype=torch.long)
all_avfreq = torch.tensor([f.avfreq for f in feature], dtype=torch.long)
word_ids = all_word_ids.to(device)
matching_matrix = all_matching_matrix.to(device)
word_mask = all_word_mask.to(device)
freq = all_freq.to(device)
avfreq = all_avfreq.to(device)
else:
word_ids = None
matching_matrix = None
word_mask = None
avfreq = None
freq = None
return input_ids, input_mask, l_mask, label_ids, matching_matrix, segment_ids, valid_ids, word_ids, word_mask, freq, avfreq
############## MODIFIED ##############
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, word=None, matrix=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.word = word
self.matrix = matrix
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None,
word_ids=None, matching_matrix=None, freq=None, avfreq=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
self.word_ids = word_ids
self.matching_matrix = matching_matrix
self.freq = freq
self.avfreq = avfreq
def readfile(filename, flag):
f = open(filename, encoding='UTF-8')
data = []
sentence = []
label = []
for line in f:
if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n":
# We randomly concatenate short sentences into long ones if the sentences come from the training set.
# We do not do that if the sentences come from eval/test set
if flag == 'train':
if len(sentence) > 32 or (0 < len(sentence) <= 32 and np.random.rand(1)[0] < 0.25):
data.append((sentence, label))
sentence = []
label = []
continue
else:
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.split('\t')
char = splits[0]
l = splits[-1][:-1]
sentence.append(char)
label.append(l)
if char in [',', '。', '?', '!', ':', ';', '(', ')', '、'] and len(sentence) > 64:
data.append((sentence, label))
sentence = []
label = []
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
return data
def readsentence(filename):
data = []
with open(filename, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
label_list = ['S' for _ in range(len(line))]
data.append((line, label_list))
return data
|
# Copyright 2020, The TensorFlow Federated Authors.
# Copyright 2020, Ronald Seoh.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple FedAvg to train EMNIST.
This is the modified version of the script included
in the original simple_fedavg implementation from TFF.
A much smaller CNN model than BERT is used. We use this
to test out the changes in our version of FedAvg.
"""
import collections
import functools
import math
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
import transformers
import fedavg
import fedavg_client
import utils
# Training hyperparameters
flags.DEFINE_integer('total_rounds', 256, 'Number of total training rounds.')
flags.DEFINE_integer('rounds_per_eval', 1, 'How often to evaluate')
flags.DEFINE_integer('train_clients_per_round', 2,
'How many clients to sample per round.')
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('batch_size', 20, 'Batch size used on the client.')
flags.DEFINE_integer('test_batch_size', 100, 'Minibatch size of test data.')
# Optimizer configuration (this defines one or more flags per optimizer).
flags.DEFINE_float('server_learning_rate', 1.0, 'Server learning rate.')
flags.DEFINE_float('client_learning_rate', 0.1, 'Client learning rate.')
FLAGS = flags.FLAGS
def get_emnist_dataset():
"""Loads and preprocesses the EMNIST dataset.
Returns:
A `(emnist_train, emnist_test)` tuple where `emnist_train` is a
`tff.simulation.ClientData` object representing the training data and
`emnist_test` is a single `tf.data.Dataset` representing the test data of
all clients.
"""
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def element_fn(element):
return (tf.expand_dims(element['pixels'], -1), element['label'])
def preprocess_train_dataset(dataset):
# Use buffer_size same as the maximum client dataset size,
# 418 for Federated EMNIST
return dataset.map(element_fn).shuffle(buffer_size=418).repeat(
count=FLAGS.client_epochs_per_round).batch(
FLAGS.batch_size, drop_remainder=False)
def preprocess_test_dataset(dataset):
return dataset.map(element_fn).batch(
FLAGS.test_batch_size, drop_remainder=False)
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
emnist_test = preprocess_test_dataset(
emnist_test.create_tf_dataset_from_all_clients())
return emnist_train, emnist_test
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629.
This function is duplicated from research/optimization/emnist/models.py to
make this example completely stand-alone.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
conv2d(filters=32, input_shape=input_shape),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Activation(tf.nn.softmax),
])
return model
def server_optimizer_fn():
return tf.keras.optimizers.SGD(learning_rate=FLAGS.server_learning_rate)
def client_optimizer_fn(optimizer_options=None):
# Declare the optimizer object first
optimizer = utils.AdamWeightDecay(
learning_rate=FLAGS.client_learning_rate,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
weight_decay_rate=optimizer_options.weight_decay_rate
)
# Then start changing its parameters
# Learning rate schedule
# Linear Decay
lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=optimizer_options.init_lr,
decay_steps=optimizer_options.num_train_steps - optimizer_options.num_warmup_steps,
end_learning_rate=optimizer_options.init_lr * optimizer_options.min_lr_ratio,
power=1
)
# Add warmup steps to the start of lr_schedule
lr_schedule = transformers.WarmUp(
initial_learning_rate=optimizer_options.init_lr,
decay_schedule_fn=lr_schedule,
warmup_steps=tf.cast(optimizer_options.num_warmup_steps, dtype=tf.float32),
)
# Apply the parameters to optimizer
optimizer.learning_rate = lr_schedule
optimizer.beta_1 = optimizer_options.adam_beta1
optimizer.beta_2 = optimizer_options.adam_beta2
optimizer.epsilon = optimizer_options.adam_epsilon
return optimizer
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Train/test dataset
train_data, test_data = get_emnist_dataset()
def tff_model_fn():
"""Constructs a fully initialized model for use in federated averaging."""
keras_model = create_original_fedavg_cnn_model(only_digits=True)
loss = tf.keras.losses.SparseCategoricalCrossentropy()
return utils.KerasModelWrapper(keras_model, test_data.element_spec, loss)
iterative_process = fedavg.build_federated_averaging_process(
model_fn=tff_model_fn,
model_input_spec=test_data.element_spec,
server_optimizer_fn=server_optimizer_fn,
client_optimizer_fn=client_optimizer_fn)
server_state = iterative_process.initialize()
client_states = {}
# Initialize client states for all clients
for i, client_id in enumerate(train_data.client_ids):
client_optimizer_options = utils.OptimizerOptions()
client_states[client_id] = fedavg_client.ClientState(
client_serial=i,
num_processed=0,
optimizer_options=client_optimizer_options)
metric = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
model = tff_model_fn()
for round_num in range(FLAGS.total_rounds):
sampled_client_serials = np.random.choice(
len(train_data.client_ids),
size=FLAGS.train_clients_per_round,
replace=False)
# Generate client datasets
sampled_train_data = []
sampled_client_states = []
for client_serial in sampled_client_serials:
client_data = train_data.create_tf_dataset_for_client(train_data.client_ids[client_serial])
# Check the client lengths and put appropriate number of
# training steps into OptimizerOptions
# Apparently iterating through each of them is
# the only way to get the lengths of tf.data.Dataset
# This is not very cool tbh.
client_data_length = 0
for _ in client_data:
client_data_length = client_data_length + 1
client_train_steps = math.ceil(client_data_length / FLAGS.batch_size)
client_states[train_data.client_ids[client_serial]].optimizer_options.num_train_steps = client_train_steps
sampled_train_data.append(client_data)
sampled_client_states.append(client_states[train_data.client_ids[client_serial]])
server_state, new_client_states, train_metrics = iterative_process.next(
server_state, sampled_client_states, sampled_train_data)
print(f'Round {round_num} training loss: {train_metrics}')
print()
# Update client states
print("Updating client states.")
for state in new_client_states:
client_states[train_data.client_ids[state.client_serial]] = state
print()
if round_num % FLAGS.rounds_per_eval == 0:
model.from_weights(server_state.model_weights)
accuracy = utils.keras_evaluate(model.keras_model, test_data, metric)
print(f'Round {round_num} validation accuracy: {accuracy * 100.0}')
print()
if __name__ == '__main__':
app.run(main)
|
"""zmap"""
# -*- coding:utf-8 -*-
# from .zmap import Zmap
# from .zmapconfig import ZmapConfig
|
# -*- coding: utf-8 -*-
from gluon import *
from gluon.storage import Storage
from s3 import *
# =============================================================================
class S3MainMenuLayout(S3NavigationItem):
"""
Application Main Menu Layout
"""
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
if item.parent is None:
# The main menu
items = item.render_components()
return UL(items, _id="nav")
else:
if item.components:
# A submenu
items = item.render_components()
_class = item.selected and "highlight" or ""
return LI(A(item.label,
_href=item.url(),
_id=item.attr._id,
_class=_class),
UL(items, _class="sub-menu"))
else:
# A menu item
return LI(A(item.label,
_href=item.url(),
_id=item.attr._id))
else:
return None
# =============================================================================
class S3OptionsMenuLayout(S3NavigationItem):
"""
Controller Options Menu Layout
"""
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
enabled = False
visible = False
elif item.enabled is None or item.enabled:
enabled = True
visible = True
if enabled and visible:
if item.parent is not None:
if item.enabled and item.authorized:
if item.components:
# Submenu
_class = ""
if item.parent.parent is None and item.selected:
_class = "highlight"
items = item.render_components()
if items:
items = LI(UL(items, _class="menu-extention"))
return [LI(A(item.label,
_href=item.url(),
_id=item.attr._id,
_class=_class)), items]
else:
# Submenu item
if item.parent.parent is None:
_class = item.selected and "highlight" or ""
else:
_class = " "
return LI(A(item.label,
_href=item.url(),
_id=item.attr._id,
_class=_class))
else:
# Main menu
items = item.render_components()
return UL(items, _id="main-sub-menu", _class="sub-menu")
else:
return None
# =============================================================================
class S3LanguageMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
""" Language menu layout
options for each entry:
- lang_code: the language code
- lang_name: the language name
option for the menu
- current_language: code of the current language
"""
if item.enabled:
if item.components:
# The language menu itself
current_language = current.T.accepted_language
items = item.render_components()
select = SELECT(items, value=current_language,
_name="_language",
_title="Language Selection",
_onchange="S3reloadWithQueryStringVars({'_language': $(this).val()});")
form = FORM(select, _id="language_selector",
_name="_language",
_action="",
_method="get")
return form
else:
# A language entry
return OPTION(item.opts.lang_name,
_value=item.opts.lang_code)
else:
return None
# -------------------------------------------------------------------------
def check_enabled(self):
""" Check whether the language menu is enabled """
if current.deployment_settings.get_L10n_display_toolbar():
return True
else:
return False
# -----------------------------------------------------------------------------
# Shortcut
ML = S3LanguageMenuLayout
# =============================================================================
class S3PersonalMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return DIV(UL(items), _class="pmenu-wrapper")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return LI(A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MP = S3PersonalMenuLayout
# =============================================================================
class S3DashBoardMenuLayout(S3NavigationItem):
""" Layout for the bottom-menu (dashboard menu) """
@staticmethod
def layout(item):
T = current.T
if item.components:
items = item.render_components()
else:
items = None
if item.parent is None:
#return items
#elif item.parent.parent is None:
if items:
if item.attr._id is not None:
_id = item.attr._id
else:
_id = "sub-dashboard"
return UL(items, _id=_id)
else:
return ""
else:
if item.components:
return LI(A(H2(item.label),
UL(items),
IMG(_src=URL(c="static", f="themes",
args=["IFRC", "img", item.opts.image]),
_alt=T(item.opts.title)),
_href=item.url()))
elif item.opts.text:
return LI(A(H2(item.label),
P(item.opts.text),
IMG(_src=URL(c="static", f="themes",
args=["IFRC", "img", item.opts.image]),
_alt=item.opts.image),
_href=item.url()))
else:
return LI(A(item.label, _href=item.url()))
# -----------------------------------------------------------------------------
# Shortcut
DB = S3DashBoardMenuLayout
# END =========================================================================
|
'''
List of all team with their team IDs
'''
teamList = [ ('lakers','1610612747'),
('warriors', '1610612744'),
('celtics','1610612738'),
('rockets', '1610612745'),
('kings','1610612758'),
('mavericks','1610612742'),
('knicks', '1610612752'),
('bulls', '1610612741'),
('clippers', '1610612746'),
('nets','1610612751'),
('76ers', '1610612755'),
('raptors', '1610612761'),
('cavaliers', '1610612739'),
('pistons', '1610612765'),
('pacers', '1610612754'),
('bucks', '1610612749'),
('hawks', '1610612737'),
('hornets', '1610612766'),
('heat', '1610612748'),
('magic', '1610612753'),
('wizards', '1610612764'),
('nuggets', '1610612743'),
('timberwolves', '1610612750'),
('thunder', '1610612760'),
('trail blazers', '1610612757'),
('jazz', '1610612762'),
('suns', '1610612756'),
('grizzlies', '1610612763'),
('pelicans', '1610612740'),
('spurs', '1610612759')
] |
"""
Helper functions for scrapy project
dupCheck(): Delete duplicate entries.
Called after each reactor.run() in crimelog.py
"""
import sys
sys.path.insert(0, '/home/amirkurtovic/crimeline')
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'crimeline.settings'
from storylist.models import Story
from scrapy.contrib.djangoitem import DjangoItem
def dupCheck():
'''
Searches database for duplicate URL entries before saving item.
item: Story object passed by each spider
'''
for url in Story.objects.values_list('url', flat=True).distinct():
Story.objects.filter(pk__in=Story.objects.filter(url=url).values_list('id', flat=True)[1:]).delete()
|
PRESENTATIONS = '''
query getPresentations {
presentations {
id
isKeynote
name
desc
owner {
username
}
backgroundDesc
language
duration
category {
id
name
nameKo
nameEn
slug
visible
}
difficulty {
id
name
nameKo
nameEn
}
recordable
submitted
accepted
}
}
'''
PRESENTATION = '''
query getPresentation($id: Int!) {
presentation(id: $id) {
id
isKeynote
name
desc
owner {
username
}
backgroundDesc
language
duration
category {
id
name
nameKo
nameEn
slug
visible
}
difficulty {
id
name
nameKo
nameEn
}
recordable
submitted
accepted
}
}
'''
MY_PRESENTATION_PROPOSAL = '''
query getMyPresentationProposal {
myPresentationProposal {
name
desc
owner {
username
}
backgroundDesc
detailDesc
language
duration
category {
id
name
nameKo
nameEn
slug
visible
}
difficulty {
id
name
nameKo
nameEn
}
isPresentedBefore
placePresentedBefore
presentedSlideUrlBefore
recordable
submitted
accepted
}
}
'''
CREATE_OR_UPDATE_PRESENTATION_PROPOSAL = '''
mutation createOrUpdatePresentationProposal($data: PresentationProposalInput!) {
createOrUpdatePresentationProposal(data: $data) {
proposal {
name
desc
backgroundDesc
detailDesc
language
duration
isPresentedBefore
placePresentedBefore
presentedSlideUrlBefore
submitted
category {
id
name
nameKo
nameEn
slug
visible
}
difficulty {
id
name
nameKo
nameEn
}
}
}
}
'''
|
class CasillaVotacion:
def __init__(self, identificador, pais) -> None:
self.__identrifiador = identificador
self.__pais = pais
self.__region = None
# ---Without @property decorator---
# def get_region(self):
# return self.__region
# def set_region(self, region):
# if region in self.__pais:
# self.__region = region
# else:
# raise ValueError(
# f'La region {region} no esta en el pais {self.__pais}')
# ---With @property decorator---
@property
def region(self):
return self.__region
@region.setter
def region(self, region):
if region in self.__pais:
self.__region = region
else:
raise ValueError(
f'La region {region} no esta en el pais {self.__pais}')
|
from rest_framework import viewsets
from aluraflix_api.videos import Video
from aluraflix_api.serializer import VideoSerializer
class VideosViewSet(viewsets.ModelViewSet):
"""Ëxibindo todos os vídeos"""
queryset = Video.objects.all()
serializer_class = VideoSerializer
|
#!/usr/bin/env -S python3 -B
# Copyright (c) 2022 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
DEFAULT_CHIP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..', '..'))
otaImageFilePath = sys.argv[1]
rawImageFilePath = sys.argv[2]
rawImageContent = ' '.join(sys.argv[3:])
def main():
# Write the raw image content
with open(rawImageFilePath, 'w') as rawFile:
rawFile.write(rawImageContent)
# Add an OTA header to the raw file
otaImageTool = DEFAULT_CHIP_ROOT + '/src/app/ota_image_tool.py'
cmd = [otaImageTool, 'create', '-v', '0xDEAD', '-p', '0xBEEF', '-vn', '2',
'-vs', "2.0", '-da', 'sha256', rawImageFilePath, otaImageFilePath]
s = subprocess.Popen(cmd)
s.wait()
if s.returncode != 0:
raise Exception('Cannot create OTA image file')
if __name__ == "__main__":
main()
|
"""Update mesosite station table station names
HML files provide station names that are likely better than what I manually
hacked into the database previously..."""
from __future__ import print_function
import sys
from pyiem.nws.products.hml import parser
from pyiem.reference import nwsli2state
from pyiem.util import get_dbconn
sys.path.insert(0, "../dbutil")
from delete_station import delete_logic # @UnresolvedImport
def build_stations():
"""Build the cross reference"""
xref = {}
pgconn = get_dbconn('afos')
cursor = pgconn.cursor('streamer')
cursor.execute("""
SELECT data from products WHERE
entered > 'YESTERDAY' and substr(pil, 1, 3) = 'HML'
""")
for row in cursor:
hml = parser(row[0])
for _hml in hml.data:
xref[_hml.station] = _hml.stationname
return xref
def should_delete_coop(rwcursor, icursor, nwsli):
""" Should this site be moved from COOP to DCP"""
icursor.execute("""SELECT distinct physical_code || duration
from current_shef WHERE station = %s""", (nwsli,))
codes = []
for row in icursor:
codes.append(row[0])
iscoop = (True in [c[2] == 'D' for c in codes])
if iscoop:
# Leave as is...
print(" - Site is a COOP, will not delete it")
return
network = "%s_COOP" % (nwsli2state[nwsli[3:5]],)
print(" -> Removing %s %s" % (network, nwsli))
# remove COOP
delete_logic(icursor, rwcursor, network, nwsli)
def should_switch_2dcp(rwcursor, icursor, nwsli, iemid):
""" Should this site be moved from COOP to DCP"""
icursor.execute("""SELECT distinct physical_code || duration
from current_shef WHERE station = %s""", (nwsli,))
codes = []
for row in icursor:
codes.append(row[0])
iscoop = (True in [c[2] == 'D' for c in codes])
network = "%s_DCP" % (nwsli2state[nwsli[3:5]],)
if iscoop:
print("Site is a COOP, adding DCP...")
rwcursor.execute("""INSERT into stations select id, synop, name,
state, country, elevation, %s, online, geom, params, county, plot_name,
climate_site, nwn_id, wfo, archive_end, remote_id,
modified, spri, tzname, nextval('stations_iemid_seq'::regclass),
archive_begin, metasite, sigstage_low, sigstage_action,
sigstage_bankfull,
sigstage_flood, sigstage_moderate, sigstage_major,
sigstage_record, ugc_county, ugc_zone, ncdc81, temp24_hour,
precip24_hour from stations where iemid = %s
""", (network, iemid))
return
print(" -> Switching site: %s to network: %s" % (nwsli, network))
rwcursor.execute("""UPDATE stations SET network = %s where iemid = %s
""", (network, iemid))
def merge(xref):
""" Do some logic here to clean things up!"""
pgconn = get_dbconn('mesosite', user='mesonet')
ipgconn = get_dbconn('iem', user='mesonet')
for nwsli, name in xref.iteritems():
cursor = pgconn.cursor()
rwcursor = pgconn.cursor()
icursor = ipgconn.cursor()
name = name[:64] # database name size limitation
cursor.execute("""SELECT id, name, network, iemid from stations WHERE
id = %s and (network ~* 'COOP' or network ~* 'DCP')
""", (nwsli,))
if cursor.rowcount == 0:
print("Unknown station: %s" % (nwsli,))
elif cursor.rowcount == 1:
row = cursor.fetchone()
if row[2].find("DCP") == -1:
print("Site is listed as only COOP: %s" % (nwsli,))
should_switch_2dcp(rwcursor, icursor, nwsli, row[3])
if row[1] != name:
print(" -> Update %s |%s| -> |%s|" % (nwsli, row[1], name))
rwcursor.execute("""UPDATE stations SET name = %s
WHERE iemid = %s""", (name, row[3]))
elif cursor.rowcount == 2:
row = cursor.fetchone()
row2 = cursor.fetchone()
print("DCP/COOP Duplicate: %s |%s| |%s|" % (nwsli, row[1],
row2[1]))
should_delete_coop(rwcursor, icursor, nwsli)
# Fix DCP name
for _r in [row, row2]:
if _r[2].find("DCP") > -1 and _r[1] != name:
print(" -> Updating Name to |%s|" % (name,))
rwcursor.execute("""UPDATE stations SET name = %s
WHERE iemid = %s""", (name, _r[3]))
else:
print("Too many rows for: %s" % (nwsli,))
pgconn.commit()
rwcursor.close()
ipgconn.commit()
icursor.close()
pgconn.close()
ipgconn.close()
def main():
"""Go Main Go"""
xref = build_stations()
merge(xref)
if __name__ == '__main__':
main()
|
import tensorflow as tf
import numpy as np
from tensor2tensor.models.research import universal_transformer, universal_transformer_util
from tensor2tensor.models import transformer
def universal_transformer_encoder(inputs, target_space,
hparams, features=None, make_image_summary=False):
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer.transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
[encoder_output,
encoder_extra_output] = universal_transformer_util.universal_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=None,
make_image_summary=make_image_summary)
# encoder_output = tf.expand_dims(encoder_output, 2)
return encoder_output
|
""" Handles all passlib crypto for the project """
from passlib.context import CryptContext
from os import urandom
HASHER = CryptContext(schemes=["argon2"], deprecated="auto")
#hashedPass should be the hasher .hash string output
def verify_password(password : str, hashed_pass : str) -> bool:
return HASHER.verify(password, hashed_pass)
def hash_password(password : str) -> str:
hashed_pass = HASHER.hash(password)
print('argon2 salted hash is: '+hashed_pass)
return hashed_pass
|
import numpy as np
import time
def frag_sampling(n):
ref_segs = np.load('fragments.npy')
bins = (30, 62, 120, 180, 240, 300, 360, 420, 480, 540, \
600, 900, 1200, 1500, 1800, 2100, 2400, 2700, 3000, 3300,\
3600, 4200, 5400, 6000, 7200, 8400, 9600)[2:-1]
hist, bin_edge = np.histogram(ref_segs, bins=bins)
bins_prob = hist / hist.sum()
ret = np.zeros(n)
for i in range(n):
bi = np.random.choice(range(len(bins_prob)), 1, p=bins_prob)
ret[i] = np.random.rand() * (bin_edge[bi+1] - bin_edge[bi]) + bin_edge[bi]
return ret
def synthetic_trace(nodes, nf):
np.random.seed(2021)
fragments = frag_sampling(nf)
avg_frag = np.mean(fragments)
trace_cont = {_n:[] for _n in nodes}
for i in range(nf):
_nd = np.random.randint(0, len(nodes)) # node id
_oc = np.random.poisson(10*avg_frag) # occupied time
_oc = 120 # TODO: change back to 120 after debugging
if len(trace_cont[nodes[_nd]]) == 0:
pfe = 0
else:
pfe = trace_cont[nodes[_nd]][-1][-1] # end of last fragment
trace_cont[nodes[_nd]].append((pfe + _oc, pfe + _oc + fragments[i])) # frag start and end
return trace_cont
def create_events_base_on_trace(nodes, nf):
trace_cont = synthetic_trace(nodes, nf=100000)
# for record the nodes status
counters = {}
flags = {}
for node in nodes:
counters[node] = 0
flags[node] = False
start_time = time.time()
while(True):
time.sleep(0.1)
relate_time = time.time() - start_time
for node in nodes:
timestamps_tuple = trace_cont[node] # list of tuple for node
current_time_tuple = timestamps_tuple[counters[node]]
if relate_time > current_time_tuple[0] and flags[node] == False:
print("node " + node + " in") # trigger node in event
flags[node] = True
if relate_time > current_time_tuple[1] and flags[node] == True:
print("node " + node + " leave") # trigger node leave event
flags[node] = False
counters[node] = counters[node] + 1
|
#!/usr/bin/env python3.7
"""
This module containe classes that provide accesse to information about workers.
.add_new_worker() - add new worker to company.
.edit_worker() - edit all worker information.
.upd_comp_structure() - update company structure scheme, for instance if you
whant to add new division or sub-division.
.print_comp_structure() - show company structure.
.print_archive_workers() - show layed off workers.
.add_salary_to_workers() - add salary to workers.
.return_from_archive() - return worker from archive to working place.
.give_workers_from_shift() - give workers from current shift.
.give_workers_from_division(self) - return worker list from current division.
.give_mining_workers() - give list of mining workers.
.print_workers_from_division() - print workers from division.
.print_telefon_numbers() - print workers telefone numbers.
.show_anniversary_workers() - show anniversary workers for this year.
"""
from __future__ import annotations
from pprint import pprint
from datetime import date
from typing import Dict, List
from .administration.logger_cfg import Logs
from .support_modules.custom_exceptions import MainMenu
from .workers_salary import WorkersSalary
from .support_modules.standart_functions import (
BasicFunctionsS
as BasF_S
)
LOGGER = Logs().give_logger(__name__)
class WorkerS(BasF_S):
"""Particular worker."""
__slots__ = [
'name',
'working_place',
'telefone_number',
'employing_lay_off_dates',
'salary',
'penalties',
'contributions',
]
def __init__(self, name, working_place):
"""Create worker."""
self.name = name
self.working_place = working_place
# working_place = {'division': division,
# 'subdivision': subdivision,
# 'profession': profession,
# 'shift': shift}
self.telefone_number = ''
self.employing_lay_off_dates = {
'employing': '',
'lay_off': ''
}
self.salary = {}
self.penalties = {}
self.contributions = {}
def __str__(self):
"""Print all worker info."""
# For old data in DB:
if not self.employing_lay_off_dates:
self.employing_lay_off_dates = {'employing': '',
'lay_off': ''}
output = ("ФИО: {}\n".format(self.name)
+ """Подразделение: {division}
Служба: {subdivision}
Профессия/должность: {profession}
Смена: {shift}\n""".format(**self.working_place)
+ "тел.: {}\n".format(self.telefone_number)
+ "дата устройства на работу: {}\n".format(
self.employing_lay_off_dates['employing']
))
return output
def __repr__(self):
"""Print only worker name."""
return self.name
class AllWorkers(BasF_S):
"""Infofmation about all workers and tools to manipulate."""
__slots__ = [
'user',
'workers_base_path',
'workers_archive_path',
'comp_structure_path',
'workers_base',
'workers_archive',
'comp_structure'
]
interkamen = {
'Карьер': {
'Инженерная служба': {'Смена 1': [],
'Смена 2': []},
'Добычная бригада': {'Смена 1': [],
'Смена 2': []},
'Механическая служба': {'Смена 1': [],
'Смена 2': []},
'Другие работники': {'Смена 1': [],
'Смена 2': [],
'Регуляный': []}
},
'Офис': {
'Инженерная служба': {'Регуляный': []},
'Бухгалтерия': {'Регуляный': []},
'Директора': {'Регуляный': []},
'Отдел кадров': {'Регуляный': []},
'Руководители служб и снабжение': {'Регулярный': []}
},
'КОЦ': {
'Инженерная служба': {'Регуляный': []},
'Рабочая бригада': {'Смена 1': [],
'Смена 2': [],
'Смена 3': [],
'Смена 4': []},
'Механическая служба': {'Регуляный': []},
'Другие работники': {'Регуляный': []}
}
}
def __init__(self, user):
"""Load workers base."""
self.user = user
self.workers_base_path = (
super().get_root_path() / 'data' / 'workers_base'
)
self.workers_archive_path = (
super().get_root_path() / 'data' / 'workers_archive'
)
self.comp_structure_path = (
super().get_root_path() / 'data' / 'company_structure'
)
self.workers_base = super().load_data(
data_path=self.workers_base_path,
user=user,
)
self.workers_archive = super().load_data(
data_path=self.workers_archive_path,
user=user,
)
self.comp_structure = super().load_data(
data_path=self.comp_structure_path,
user=user
)
# Create company structure file if it not exist.
if not self.comp_structure_path.exists():
self.upd_comp_structure()
@classmethod
def _add_worker_emp_date(cls, temp_worker: WorkerS) -> WorkerS:
"""Add worker emp date."""
emp_date = input("Введите дату в формате 2018-11-30: ")
temp_worker.employing_lay_off_dates['employing'] = emp_date
return temp_worker
@classmethod
def _add_penalties(cls, temp_worker: WorkerS) -> WorkerS:
"""Add penalties to worker."""
pen_date = input("Введите дату в формате 2018-11-30: ")
penalti = input("Введите причину взыскания: ")
temp_worker.penalties[pen_date] = penalti
return temp_worker
@classmethod
def _change_phone_number(cls, temp_worker: WorkerS) -> WorkerS:
"""Change worker phone number."""
number = input("Введите новый номер (без восьмерки): ")
new_number = ('+7(' + number[:3] + ')' + number[3:6]
+ '-' + number[6:8] + '-' + number[8:])
print(new_number)
temp_worker.telefone_number = new_number
return temp_worker
@classmethod
def _show_salary(cls, temp_worker: WorkerS):
"""Show worker salary."""
salary_count = 0
for salary_date in sorted(temp_worker.salary):
print(salary_date, '-', temp_worker.salary[salary_date], 'р.')
salary_count += temp_worker.salary[salary_date]
if temp_worker.salary:
unzero = super().count_unzero_items(temp_worker.salary)
average_sallary = round(salary_count / unzero)
print("\033[93mСредняя з/п:\033[0m ", average_sallary, 'p.')
input("\n[ENTER] - выйти.")
def _dump_workers_base(self):
"""Dump workers base to file."""
super().dump_data(
data_path=self.workers_base_path,
base_to_dump=self.workers_base,
user=self.user,
)
def _dump_company_structure(self):
"""Dump company structure to file."""
super().dump_data(
data_path=self.comp_structure_path,
base_to_dump=self.comp_structure,
user=self.user,
)
def _dump_workers_archive(self):
"""Dump workers archive to file."""
super().dump_data(
data_path=self.workers_archive_path,
base_to_dump=self.workers_archive,
user=self.user,
)
def _change_profession(self, temp_worker: WorkerS) -> WorkerS:
"""Change worker profession."""
division = temp_worker.working_place['division']
subdivision = temp_worker.working_place['subdivision']
new_profession = self._choose_profession(division, subdivision)
temp_worker.working_place['profession'] = new_profession
return temp_worker
def _choose_profession(self, division, subdivision) -> str:
"""Choose or input profession."""
if subdivision != 'Добычная бригада':
print("Выберете название профессии:")
new_profession = super().choise_from_list(
WorkersSalary(self.user).salary_list[division]
)
else:
new_profession = input("Введите название профессии: ")
return new_profession
def _add_working_place(self, profession):
"""Change worker working place."""
print("Выберете подразделение:")
division = super().choise_from_list(self.interkamen)
print("Выберете отдел:")
subdivision = super().choise_from_list(
self.interkamen[division])
print("Выберете смену:")
shift = super().choise_from_list(
self.interkamen[division][subdivision])
if not profession:
profession = self._choose_profession(division, subdivision)
working_place = {
'division': division,
'subdivision': subdivision,
'profession': profession,
'shift': shift,
}
return working_place
def _show_penalties(self, temp_worker: WorkerS) -> WorkerS:
"""Show worker penalties."""
for pen_date in temp_worker.penalties:
print("{} - {}".format(pen_date, temp_worker.penalties[pen_date]))
add = input("Добавить взыскание? Y/N: ")
if add.lower() == 'y':
temp_worker = self._add_penalties(temp_worker)
return temp_worker
def _add_worker_to_structure(
self, name: str,
working_place: Dict[str, str]
):
"""Add worker to company structure."""
division = working_place['division']
subdivision = working_place['subdivision']
shift = working_place['shift']
self.comp_structure[division][subdivision][shift].append(name)
self._dump_company_structure()
def _change_worker_name(self, temp_worker: WorkerS) -> WorkerS:
"""Change worker name."""
self._delete_worker_from_structure(temp_worker)
self.workers_base.pop(temp_worker.name, None)
new_name = input("Введите новые ФИО:")
temp_worker.name = new_name
self._add_worker_to_structure(new_name, temp_worker.working_place)
return temp_worker
def _delete_worker(self, temp_worker: WorkerS) -> None:
"""Delete worker."""
self._delete_worker_from_structure(temp_worker)
self.workers_base.pop(temp_worker.name, None)
print(f"\033[91m{temp_worker.name} - удален. \033[0m")
LOGGER.warning(
f"User '{self.user.login}' delete worker: {temp_worker.name}"
)
temp_worker = None
return temp_worker
def _delete_worker_from_structure(self, worker: WorkerS):
"""Delete worker name from company structure."""
print(worker)
division = worker.working_place['division']
subdivision = worker.working_place['subdivision']
shift = worker.working_place['shift']
self.comp_structure[division][subdivision][shift].remove(worker.name)
self._dump_company_structure()
def _lay_off_worker(self, temp_worker: WorkerS) -> WorkerS:
"""Lay off worker and put him in archive."""
temp_worker.employing_lay_off_dates['lay_off'] = str(date.today())
self.workers_archive[temp_worker.name] = temp_worker
self._dump_workers_archive()
print(f"\033[91m{temp_worker.name} - уволен. \033[0m")
LOGGER.warning(
f"User '{self.user.login}' lay off worker: {temp_worker.name}"
)
temp_worker = self._delete_worker(temp_worker)
return temp_worker
def _change_worker_shift(self, temp_worker: WorkerS) -> WorkerS:
"""Change worker shift."""
self._delete_worker_from_structure(temp_worker)
division = temp_worker.working_place['division']
subdivision = temp_worker.working_place['subdivision']
print("Выберете смену:")
new_shift = super().choise_from_list(
self.interkamen[division][subdivision])
temp_worker.working_place['shift'] = new_shift
self._add_worker_to_structure(
temp_worker.name, temp_worker.working_place)
print(f"{temp_worker.name} - переведен в '{new_shift}'.")
LOGGER.warning(
f"User '{self.user.login}' shift worker: {temp_worker.name} -> "
+ f"{new_shift}"
)
return temp_worker
def _change_working_place(self, temp_worker: WorkerS) -> WorkerS:
"""Change worker shift."""
self._delete_worker_from_structure(temp_worker)
profession = temp_worker.working_place['profession']
new_working_place = self._add_working_place(profession)
temp_worker.working_place = new_working_place
self._add_worker_to_structure(
temp_worker.name, temp_worker.working_place)
print(f"{temp_worker.name} - перемещен'.")
LOGGER.warning(
f"User '{self.user.login}' shift worker: {temp_worker.name}"
)
return temp_worker
def _manage_worker_properties(self, worker: str):
"""Manage worker property."""
while True:
temp_worker = self.workers_base[worker]
print(temp_worker)
edit_menu_dict = {
'редактировать ФИО': self._change_worker_name,
'уДАлить работника': self._delete_worker,
'уВОлить работника': self._lay_off_worker,
'перевести в другую смену': self._change_worker_shift,
'редактировать место работы': self._change_working_place,
'изменить номер телефона': self._change_phone_number,
'показать зарплату': self._show_salary,
'дата устройства на работу': self._add_worker_emp_date,
'показать взыскания': self._show_penalties,
'изменить профессию': self._change_profession,
'[закончить редактирование]': 'break',
}
print("\nВыберете пункт для редактирования:")
action_name = super().choise_from_list(edit_menu_dict)
print()
if action_name in ['[закончить редактирование]', '']:
break
temp_worker = edit_menu_dict[action_name](temp_worker)
# If worker deleted.
if not temp_worker:
break
worker = temp_worker.name
self.workers_base[worker] = temp_worker
self._dump_workers_base()
super().clear_screen()
def _give_workers(self, division: str) -> List:
"""Give workers from current division."""
worker_list = [
worker for subdivision in self.comp_structure[division]
for shift in self.comp_structure[division][subdivision]
for worker in self.comp_structure[division][subdivision][shift]
]
return worker_list
def _choose_division(self) -> str:
"""Choose worker from division."""
print("[ENTER] - выйти."
"\nВыберете подразделение:")
division = super().choise_from_list(self.comp_structure,
none_option=True)
return division
def _give_anniv_workers(self, wor, emp_date) -> List[str]:
"""Give anniversary workers for current year."""
temp_list = []
if date.today().year - int(emp_date) in [10, 15, 20, 25, 30]:
temp_list.append(' '.join([
self.workers_base[wor].name,
self.workers_base[wor].employing_lay_off_dates['employing']]))
return temp_list
def add_new_worker(self):
"""Create new worker."""
name = input("Введите ФИО: ")
working_place = self._add_working_place(None)
new_worker = WorkerS(name, working_place)
self.workers_base[name] = new_worker
self._dump_workers_base()
self._add_worker_to_structure(name, working_place)
print(f"\033[92m Добавлен сотрудник '{name}'. \033[0m")
LOGGER.warning(
f"User '{self.user.login}' add worker: {name}"
)
input('\n[ENTER] - выйти.')
def edit_worker(self):
"""Edit worker information."""
division = self._choose_division()
while True:
super().clear_screen()
print(
"[ENTER] - выйти."
"\nВыберете работника для редактирования:"
)
division_workers = self._give_workers(division)
worker = super().choise_from_list(
division_workers,
none_option=True
)
super().clear_screen()
if not worker:
break
self._manage_worker_properties(worker)
def upd_comp_structure(self):
"""Add new division in base."""
for division in self.interkamen:
if division not in self.comp_structure:
self.comp_structure[division] = self.interkamen[division]
print(f"{division} added.")
LOGGER.warning(
f"User '{self.user.login}' update company structure."
)
self._dump_company_structure()
input('\n[ENTER] - выйти')
def print_comp_structure(self):
"""Print company structure."""
for division in self.comp_structure:
print(division + ':')
pprint(self.comp_structure[division])
input('\n[ENTER] - выйти')
def print_archive_workers(self):
"""Print layed off workers."""
for worker in self.workers_archive:
print(
worker,
self.workers_archive[worker].employing_lay_off_dates['lay_off']
)
input('\n[ENTER] - выйти.')
def add_salary_to_workers(
self,
*,
salary_dict: Dict[str, float],
salary_date: str,
unofficial_workers: List[str] = [],
):
"""Add monthly salary to workers."""
for worker in salary_dict:
if worker not in unofficial_workers:
temp_worker = self.workers_base[worker]
temp_worker.salary[salary_date] = salary_dict[worker]
self.workers_base[worker] = temp_worker
self._dump_workers_base()
def return_from_archive(self):
"""Return worker from archive."""
print("Выберете работника для возвращения:")
choose = super().choise_from_list(self.workers_archive,
none_option=True)
if choose:
worker = self.workers_archive[choose]
self.workers_archive.pop(choose, None)
self._dump_workers_archive()
self.workers_base[worker.name] = worker
self._dump_workers_base()
self._add_worker_to_structure(worker.name, worker.working_place)
print(f"\033[92mCотрудник '{worker.name}' возвращен\033[0m")
LOGGER.warning(
f"User '{self.user.login}' retun worker from archive: "
+ f"{worker.name}"
)
def give_workers_from_shift(
self,
shift: str,
division: str = 'Карьер',
subdivision: str = 'Добычная бригада',
) -> List[str]:
"""Give worker list from shift."""
worker_list = self.comp_structure[division][subdivision][shift]
return worker_list
def give_workers_from_division(self) -> List[str]:
"""Return worker list from current division."""
division = self._choose_division()
if not division:
raise MainMenu
worker_list = self._give_workers(division)
return worker_list
def give_mining_workers(self) -> List[str]:
"""Give all mining workers from both shifts."""
mining_workers_list = (
self.comp_structure['Карьер']['Добычная бригада']['Смена 1']
+ self.comp_structure['Карьер']['Добычная бригада']['Смена 2']
)
return mining_workers_list
def print_workers_from_division(self):
"""Output workers from division."""
workers_list = self.give_workers_from_division()
for worker in sorted(workers_list):
print(self.workers_base[worker])
input('\n[ENTER] - выйти.')
def print_telefon_numbers(self, itr_shift=None):
"""Print telefone numbers of workers from division.
If itr shift, print numbers from itr users with short names.
"""
workers_list = []
if itr_shift:
workers = self.comp_structure[
'Карьер']['Инженерная служба'][itr_shift]
itr_list = []
else:
workers = self.give_workers_from_division()
for worker in sorted(workers):
name = self.workers_base[worker].name
profession = self.workers_base[worker].working_place['profession']
telefone = self.workers_base[worker].telefone_number
if itr_shift:
name = super().make_name_short(name)
itr_list.append((name, profession, telefone))
workers_list.append("{:<32}- {:<24}тел.: {}".format(
name, profession, telefone))
if not itr_shift:
print('\n'.join(workers_list))
input('\n[ENTER] - выйти')
else:
return itr_list
def show_anniversary_workers(self):
"""Show workers with this year anniversary."""
anniv_list = []
for wor in self.workers_base:
emp_date = (
self.workers_base[wor]
.employing_lay_off_dates['employing']
)
if emp_date:
emp_date = emp_date[:4]
anniv_list.extend(self._give_anniv_workers(wor, emp_date))
if anniv_list:
print("Юбиляры этого года:")
for worker in sorted(anniv_list):
print(worker)
else:
print("Нет юбиляров в этом году")
input('\n[ENTER] - выйти.')
|
import requests
from django.http import HttpResponse, HttpResponseNotFound
from django.template import loader
from requests.auth import HTTPBasicAuth
from config.settings.base import env
def detail(request, judgment_uri):
if judgment_uri.endswith("/"):
judgment_uri = judgment_uri[:-1]
response = requests.get(
"http://"
+ env("MARKLOGIC_HOST")
+ ":8011/LATEST/documents/?uri=/"
+ judgment_uri
+ ".xml",
auth=HTTPBasicAuth("admin", "admin"),
)
if response.status_code != 200:
return HttpResponseNotFound("That judgment was not found")
else:
context = {"xml": response.text}
template = loader.get_template("judgment/detail.html")
return HttpResponse(template.render(context, request))
|
from knack.help_files import helps
helps['aro'] = """
type: group
short-summary: Manage Azure Red Hat OpenShift clusters.
"""
helps['aro create'] = """
type: command
short-summary: Create a cluster.
"""
helps['aro list'] = """
type: command
short-summary: List clusters.
"""
helps['aro delete'] = """
type: command
short-summary: Delete a cluster.
"""
helps['aro show'] = """
type: command
short-summary: Get the details of a cluster.
"""
helps['aro update'] = """
type: command
short-summary: Update a cluster.
"""
helps['aro get-credentials'] = """
type: command
short-summary: Get credentials of a cluster.
"""
|
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from plotnine import (ggplot, aes, geom_text, geom_label,
scale_size_continuous, scale_y_continuous)
n = 5
labels = ['ggplot', 'aesthetics', 'data', 'geoms',
'$\mathbf{statistics^2}$', 'scales', 'coordinates']
df = pd.DataFrame({
'x': [1] * n,
'y': range(n),
'label': labels[:n],
'z': range(n),
'angle': np.linspace(0, 90, n)
})
def test_text_aesthetics():
p = (ggplot(df, aes(y='y', label='label')) +
geom_text(aes('x', label='label'), size=15, ha='left') +
geom_text(aes('x+1', angle='angle'),
size=15, va='top', show_legend=False) +
geom_text(aes('x+2', label='label', alpha='z'),
size=15, show_legend=False) +
geom_text(aes('x+3', color='factor(z)'),
size=15, show_legend=False) +
geom_text(aes('x+5', size='z'),
ha='right', show_legend=False) +
scale_size_continuous(range=(12, 30)) +
scale_y_continuous(limits=(-0.5, n-0.5)))
assert p == 'text_aesthetics'
def test_label_aesthetics():
p = (ggplot(df, aes(y='y', label='label')) +
geom_label(aes('x', label='label', fill='z'),
size=15, ha='left', show_legend=False) +
geom_label(aes('x+1', angle='angle'),
size=15, va='top', show_legend=False) +
geom_label(aes('x+2', label='label', alpha='z'),
size=15, show_legend=False) +
geom_label(aes('x+3', color='factor(z)'),
size=15, show_legend=False) +
geom_label(aes('x+5', size='z'),
ha='right', show_legend=False) +
scale_size_continuous(range=(12, 30)) +
scale_y_continuous(limits=(-0.5, n-0.5)))
assert p == 'label_aesthetics'
|
import vtk
import numpy
import math
def readDataset(filePath):
# load dataset
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(filePath)
reader.Update()
reader.GetOutput().GetPointData().RemoveArray("ProcessId")
reader.GetOutput().GetPointData().RemoveArray("vtkValidPointMask")
reader.GetOutput().GetPointData().RemoveArray("vtkGhostType")
bounds = numpy.zeros(6);
reader.GetOutput().GetBounds(bounds)
dimension = 3
if abs(bounds[5]-bounds[4]) < 1e-5:
dimension = 2
if reader.GetOutput().GetSpacing()[2] == 0:
reader.GetOutput().SetSpacing(reader.GetOutput().GetSpacing()[0],reader.GetOutput().GetSpacing()[1],1)
return(reader.GetOutput(), bounds, dimension)
def cutoutPattern(dataset, dimension, position, radius):
pattern = vtk.vtkImageData()
pattern.SetOrigin( position )
# spacing = radius/25
# pattern.SetSpacing( [spacing,spacing,spacing] )
# if dimension == 2:
# pattern.SetExtent( [-25,25,-25,25,0,0] )
# else:
# pattern.SetExtent( [-25,25,-25,25,-25,25] )
# pattern.SetSpacing( [0.02,0.02,0.02] )
# if dimension == 2:
# pattern.SetExtent( [-50,50,-50,50,0,0] )
# else:
# pattern.SetExtent( [-50,50,-50,50,-50,50] )
pattern.SetSpacing( dataset.GetSpacing() )
extent = int(radius/dataset.GetSpacing()[0]+1e-10)
if dimension == 2:
pattern.SetExtent( [-extent,extent,-extent,extent,0,0] )
else:
pattern.SetExtent( [-extent,extent,-extent,extent,-extent,extent] )
output = sample(dataset, pattern)
output.GetPointData().RemoveArray("vtkGhostType")
return output
def createCoarseDataset(bounds, nx, ny, nz):
if( (nz == 0 and bounds[5]-bounds[4]>1e-10) or (nz > 0 and bounds[5]-bounds[4]<1e-10) ):
print("ERROR: dimension of dataset and extent do not match")
return
nx = nx-1
ny = ny-1
datasetCoarse = vtk.vtkImageData()
datasetCoarse.SetOrigin( bounds[0], bounds[2], bounds[4] )
if( nz == 0 ):
datasetCoarse.SetSpacing( 1./nx*(bounds[1]-bounds[0]), 1./ny*(bounds[3]-bounds[2]), 1 )
else:
nz = nz-1
datasetCoarse.SetSpacing( 1./nx*(bounds[1]-bounds[0]), 1./ny*(bounds[3]-bounds[2]), 1./nz*(bounds[5]-bounds[4]) )
datasetCoarse.SetExtent( 0, nx, 0, ny, 0, nz )
return datasetCoarse
def sample(dataset, grid):
probe = vtk.vtkProbeFilter()
probe.SetInputData(grid)
probe.SetSourceData(dataset)
probe.Update()
probe.GetOutput().GetPointData().RemoveArray("vtkValidPointMask")
return probe.GetOutput()
def scaleDataset(dataset, s, nameOfPointData):
if dataset.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 1:
for i in range(dataset.GetNumberOfPoints()):
dataset.GetPointData().GetArray(nameOfPointData).SetTuple1(i,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[0]*s)
if dataset.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 3:
for i in range(dataset.GetNumberOfPoints()):
dataset.GetPointData().GetArray(nameOfPointData).SetTuple3(i,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[0]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[1]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[2]*s)
if dataset.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 9:
for i in range(dataset.GetNumberOfPoints()):
dataset.GetPointData().GetArray(nameOfPointData).SetTuple9(i,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[0]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[1]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[2]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[3]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[4]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[5]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[6]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[7]*s,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[8]*s)
return dataset
def shiftDataset(dataset, s, nameOfPointData):
if dataset.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 1:
for i in range(dataset.GetNumberOfPoints()):
dataset.GetPointData().GetArray(nameOfPointData).SetTuple1(i,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[0]+s[0])
if dataset.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 3:
for i in range(dataset.GetNumberOfPoints()):
dataset.GetPointData().GetArray(nameOfPointData).SetTuple3(i,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[0]+s[0],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[1]+s[1],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[2]+s[2])
if dataset.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 9:
for i in range(dataset.GetNumberOfPoints()):
dataset.GetPointData().GetArray(nameOfPointData).SetTuple9(i,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[0]+s[0],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[1]+s[1],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[2]+s[2],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[3]+s[3],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[4]+s[4],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[5]+s[5],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[6]+s[6],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[7]+s[7],dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i)[8]+s[8])
return dataset
# this uses the probe filter to map the rotated values to the original grid. That lets the similarity end up in around 10e8 for vectors. For the others the similarity is higher
def rotateDataset(dataset, angle, nameOfPointData):
bounds=[0]*6
dataset.GetBounds(bounds)
center = [0.5*(bounds[1]+bounds[0]), 0.5*(bounds[3]+bounds[2]), 0.5*(bounds[5]+bounds[4])]
dataset.SetOrigin(dataset.GetOrigin()[0]-center[0],dataset.GetOrigin()[1]-center[1],dataset.GetOrigin()[2]-center[2])
trans = vtk.vtkTransform()
trans.RotateZ(angle*180/math.pi)
tf = vtk.vtkTransformFilter()
tf.SetTransform(trans)
tf.SetInputData(dataset);
tf.Update()
result = vtk.vtkImageData()
result.DeepCopy(dataset)
result.SetSpacing(dataset.GetSpacing()[0]*(1-1e-10),dataset.GetSpacing()[1]*(1-1e-10),dataset.GetSpacing()[2]*(1-1e-10))
result = sample(tf.GetOutput(),result)
result.SetSpacing(dataset.GetSpacing())
result.SetOrigin(dataset.GetOrigin()[0]+center[0],dataset.GetOrigin()[1]+center[1],dataset.GetOrigin()[2]+center[2])
if result.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 9:
array = result.GetPointData().GetArray(nameOfPointData)
rotMat = numpy.array([[numpy.cos(angle), -numpy.sin(angle),0], [numpy.sin(angle), numpy.cos(angle),0], [0,0,1]])
for i in range(dataset.GetNumberOfPoints()):
value = numpy.array(array.GetTuple(i)).reshape(3,3)
value = numpy.dot(rotMat,numpy.dot(value,rotMat.transpose()))
value = value.reshape(9,1)
array.SetTuple9( i, value[0], value[1], value[2], value[3], value[4], value[5], value[6], value[7], value[8] );
return result
# if we know that we use only multiples of 90degree, we can make it more precise (10e13) by using the direct index correspondence, but we have to rotate the vectors by hand (like the matrices).
def rotateDatasetExact(dataset, angle, nameOfPointData):
result = vtk.vtkImageData()
result.DeepCopy(dataset)
bounds = numpy.zeros(6)
result.GetBounds(bounds)
n = result.GetDimensions()[0]
m = result.GetDimensions()[1]
for l in range(result.GetNumberOfPoints()):
if bounds[5]-bounds[4] < 1e-5:
i = l % n
j = l // n
k = 0
else:
i = l % n
j = (l // m) % n
k = l // (n*m)
# print l, i, j, k, i+j*n+k*n*m, (n-1-i)*n+j+k*n*m, (n-1-i)+(n-1-j)*n+k*n*m, i*n+(n-1-j)+k*n*m
if abs(angle * 2 / math.pi - 0) < 1e-5:
result.GetPointData().GetArray(nameOfPointData).SetTuple(l,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i+j*n+k*n*m))
if abs(angle * 2 / math.pi - 1) < 1e-5:
result.GetPointData().GetArray(nameOfPointData).SetTuple(l,dataset.GetPointData().GetArray(nameOfPointData).GetTuple((n-1-i)*n+j+k*n*m))
if abs(angle * 2 / math.pi - 2) < 1e-5:
result.GetPointData().GetArray(nameOfPointData).SetTuple(l,dataset.GetPointData().GetArray(nameOfPointData).GetTuple((n-1-i)+(n-1-j)*n+k*n*m))
if abs(angle * 2 / math.pi - 3) < 1e-5:
result.GetPointData().GetArray(nameOfPointData).SetTuple(l,dataset.GetPointData().GetArray(nameOfPointData).GetTuple(i*n+(n-1-j)+k*n*m))
if result.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 3:
array = result.GetPointData().GetArray(nameOfPointData)
rotMat = numpy.array([[numpy.cos(angle), -numpy.sin(angle),0], [numpy.sin(angle), numpy.cos(angle),0], [0,0,1]])
for i in range(dataset.GetNumberOfPoints()):
value = numpy.array(array.GetTuple(i)).reshape(3,1)
value = numpy.dot(rotMat,value)
result.GetPointData().GetArray(nameOfPointData).SetTuple3( i, value[0], value[1], value[2] );
if result.GetPointData().GetArray(nameOfPointData).GetNumberOfComponents() == 9:
array = result.GetPointData().GetArray(nameOfPointData)
rotMat = numpy.array([[numpy.cos(angle), -numpy.sin(angle),0], [numpy.sin(angle), numpy.cos(angle),0], [0,0,1]])
for i in range(dataset.GetNumberOfPoints()):
value = numpy.array(array.GetTuple(i)).reshape(3,3)
value = numpy.dot(rotMat,numpy.dot(value,rotMat.transpose()))
value = value.reshape(9,1)
array.SetTuple9( i, value[0], value[1], value[2], value[3], value[4], value[5], value[6], value[7], value[8] );
return result
def myNorm(tuple):
norm = 0
for i in range(len(tuple)):
norm = norm + tuple[i] * tuple[i]
return math.sqrt(norm)
def myAbst(x,y):
z = [0]*3
for i in range(3):
z[i] = x[i]-y[i]
return myNorm(z)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 01:02:06 2018
@author: Gunari Bharath
"""
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from matplotlib import pyplot as plt
file = "bc.txt"
data = np.loadtxt(file, delimiter = ",")
data = data[:,1:10]
ks = [2,3,4,5,6,7,8]
def GetCentroids():
new_centroids = []
for i in range(k):
sum1 = []
for j in range(len(indecis)):
if i == indecis[j]:
sum1.append(data[j])
new_centroids.append(np.mean(sum1, axis = 0))
return new_centroids
func = []
for k in ks:
centroids = np.random.randint(0,10,(k,9))
diff = 2
while(diff != 0):
distances = cdist(data,centroids)
indecis = np.argmin(distances, axis = 1)
newcentroids = GetCentroids()
diff = np.absolute(np.sum(np.subtract(newcentroids,centroids)))
centroids = newcentroids
centroids = np.round(centroids)
distances_2 = np.square(cdist(data,centroids))
sum2 = 0
for i in range(k):
sum1 = 0
for j in range(len(indecis)):
if i == indecis[j]:
sum1 += distances_2[j][i]
sum2 += sum1
print("The value of the potential function for k ", k , " is " , sum2)
func.append(sum2)
fig,ax=plt.subplots()
plt.title("Potential Function vs k")
plt.xlabel("values k ")
plt.ylabel("Potential functions ")
ax.plot(ks,func, '*-',label="K Means Classifier")
ax.legend()
plt.show()
|
# coding=utf-8
# Copyright 2019 StrTrek Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# System Required
from datetime import datetime, timedelta
import time
# Outer Required
# import msgpack
# Inner Required
# Global Parameters
from Babelor.Config import CONFIG
class TASKS:
def __init__(self, interval: int = CONFIG.TASK_BLOCK_TIME):
self.tasks = []
self.interval = interval
self.active = True
def run_tasks(self):
runnable_tasks = [task for task in self.tasks if task.should_run()]
for task in sorted(runnable_tasks):
task.run()
# --------------------
next_run_tasks_cnt = 0
for task in self.tasks:
if task.next_should_run():
next_run_tasks_cnt += 1
if next_run_tasks_cnt == 0:
self.active = False
def add(self, start: datetime, delta: timedelta, func: callable,
expired: int = CONFIG.TASK_MAX_RUN_TIMES, **kwargs):
task = TASK(start, delta, expired, func, **kwargs)
self.tasks.append(task)
def start(self):
while self.active:
self.run_tasks()
time.sleep(self.interval)
else:
self.active = True
class TASK:
def __init__(self, start: datetime, delta: timedelta, expired: int, func: callable, **kwargs):
self.task_func = func
self.last_run_datetime = datetime.now() # datetime of the last run
self.next_run_datetime = start + delta # datetime of the next run
self.timedelta = delta # timedelta between runs
self.start_datetime = start # specific datetime to start on
self.expire_datetime = start + (delta * (expired + 1)) # specific datetime to expired
self.run_times = 0 # run times
self.kwargs = kwargs
def run(self):
if self.task_func is not None:
self.task_func(**self.kwargs)
self.last_run_datetime = datetime.now()
self.schedule_next_run()
def next_should_run(self):
return self.next_run_datetime < self.expire_datetime
def should_run(self):
current_datetime = datetime.now()
is_not_expired = current_datetime < self.expire_datetime
is_not_run = current_datetime >= self.next_run_datetime
return is_not_expired and is_not_run
def schedule_next_run(self):
self.run_times += 1
self.next_run_datetime = self.start_datetime + (self.timedelta * self.run_times)
if self.next_run_datetime < datetime.now():
self.schedule_next_run()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.demo = cms.EDAnalyzer('Compare')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.output = cms.OutputModule("AsciiOutputModule")
process.p = cms.Path(process.demo)
process.ep = cms.EndPath(process.output)
|
# Camera thread
# imports
# import own
#from trackers.camshifttracker import CAMShiftTracker
import numpy
import cv2
import sys
import threading
import time
show_fps = True
show_combo = True
show_detection = False
show_tracking = False
show_landmarks = True
frame = None
stream_reader_thread = None
showbackprojectedFrame = True
fps = 0
camera_capture = cv2.VideoCapture(0)
while True:
if camera_capture.isOpened():
start = time.time()
ret_val, frame = camera_capture.read()
end = time.time()
seconds = end - start
if seconds != 0:
fps = round(1 / seconds, 2)
if show_fps:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, str(fps), (0, 100), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
# show frame
if frame is not None:
cv2.imshow("test", frame)
# close program
if cv2.waitKey(1) == 27:
break # esc to quit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# stop camera
camera_capture.release()
cv2.destroyAllWindows()
|
from enyo.environment import Company, Market, TheGame
market = Market({'market_value': 1000000000000, 'growth_rate': 0.1})
market.add_customers()
our_company = Company('ours', .4)
our_company.set_space_params({
'competitive_advantage': {
'market_share': 5,
'product_quality': 4,
'product_life_cycle': 2,
'product_replacement_cycle': 3,
'customer_loyalty': 4,
'know_how': 4,
'vertical_integration': 3,
},
'industry_attractiveness': {
'growth_potential': 4,
'profit_potential': 4,
'financial_stability': 4,
'know_how': 4,
'resource_utilization': 3, # inefficient to efficient
'capital_intensity': 3,
'ease_of_entry': 5, # easy to difficult
'capacity_utilization': 4,
},
'environmental_stability': {
'technological_changes': 2,
'rate_of_inflation': 3,
'demand_variability': 3,
'barriers_to_entry': 4,
'competitive_pressure': 2,
'price_elasticity_of_demand': 1,
'pressure_from_substitutes': 4
},
'financial_strength': {
'ROI': 5,
'leverage': 3,
'liquidity': 2,
'required_to_available_capital': 2,
'cash_flow': 4,
'ease_of_exit': 5,
'risk_doing_business': 4,
'inventory_turnover': 4,
}
})
other_company = Company('other1', .4)
other_company.set_space_params({
'competitive_advantage': {
'market_share': 5,
'product_replacement_cycle': 3,
'product_quality': 4,
'product_life_cycle': 2,
'customer_loyalty': 4,
'know_how': 4,
'vertical_integration': 3,
},
'industry_attractiveness': {
'growth_potential': 4,
'profit_potential': 4,
'financial_stability': 4,
'know_how': 4,
'resource_utilization': 3, # inefficient to efficient
'capital_intensity': 3,
'ease_of_entry': 5, # easy to difficult
'capacity_utilization': 4,
},
'environmental_stability': {
'technological_changes': 2,
'rate_of_inflation': 3,
'demand_variability': 3,
'barriers_to_entry': 4,
'competitive_pressure': 2,
'price_elasticity_of_demand': 1,
'pressure_from_substitutes': 4
},
'financial_strength': {
'ROI': 5,
'leverage': 3,
'liquidity': 2,
'required_to_available_capital': 2,
'cash_flow': 4,
'ease_of_exit': 5,
'risk_doing_business': 4,
'inventory_turnover': 4,
}
})
other_company2 = Company('other2', .2)
other_company2.set_space_params({
'competitive_advantage': {
'market_share': 2,
'product_replacement_cycle': 2,
'product_quality': 2,
'product_life_cycle': 3,
'customer_loyalty': 4,
'know_how': 4,
'vertical_integration': 3,
},
'industry_attractiveness': {
'growth_potential': 4,
'profit_potential': 4,
'financial_stability': 4,
'know_how': 4,
'resource_utilization': 3, # inefficient to efficient
'capital_intensity': 3,
'ease_of_entry': 5, # easy to difficult
'capacity_utilization': 4,
},
'environmental_stability': {
'technological_changes': 2,
'rate_of_inflation': 3,
'demand_variability': 3,
'barriers_to_entry': 4,
'competitive_pressure': 2,
'price_elasticity_of_demand': 1,
'pressure_from_substitutes': 4
},
'financial_strength': {
'ROI': 5,
'leverage': 3,
'liquidity': 2,
'required_to_available_capital': 2,
'cash_flow': 4,
'ease_of_exit': 5,
'risk_doing_business': 4,
'inventory_turnover': 4,
}
})
# TODO add products
thegame = TheGame(market, [our_company, other_company, other_company2]) |
a = 'abab'
b = 'aba'
if b in a:
print b
|
import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules.basic import BasicCommandsModule
log = logging.getLogger(__name__)
class PointsResetModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "!pointsreset"
DESCRIPTION = "Reset points from a user with negative points."
CATEGORY = "Feature"
PARENT_MODULE = BasicCommandsModule
@staticmethod
def points_reset(bot, source, message, **options):
if message is None or len(message) == 0:
return
username = message.split(" ")[0]
if len(username) < 2:
return
with DBManager.create_session_scope() as db_session:
victim = User.find_by_user_input(db_session, username)
if victim is None:
bot.whisper(source, "This user does not exist FailFish")
return
if victim.points >= 0:
bot.whisper(source, f"{victim} doesn't have negative points FailFish")
return
if victim.points <= -1:
old_points = victim.points
victim.points = 0
bot.whisper(source, f"You changed the points for {victim} from {old_points} to {victim.points} points")
def load_commands(self, **options):
self.commands["pointsreset"] = Command.raw_command(
self.points_reset,
delay_all=0,
delay_user=5,
level=500,
description="Reset points from a user with negative points.",
can_execute_with_whisper=1,
command="pointsreset",
examples=[
CommandExample(
None,
"Reset points from a user with negative points.",
chat="user:!pointsreset pajtest\n"
"bot>user:You changed the points for pajtest from -10000 to 0 points",
description="",
).parse()
],
)
|
# This script scrape the moon changing phases (full, new) from timeanddate.com
# And print the information.
import urllib2
from BeautifulSoup import BeautifulSoup
def GetPhases():
# Set the source of the information
source = 'https://www.timeanddate.com/moon/phases/'
req = urllib2.Request(source)
# Set http header to get the page in english
req.add_header('Accept-Language', 'en-US')
# Get the page data
soup = BeautifulSoup(urllib2.urlopen(req).read())
# Get the rows of the table that contains the moon phases
therows = soup('table', {'id': 'mn-cyc'})[0].tbody('tr')
# The first row contains the name of the event (Full Moon, New Moon, ...)
events_row = therows[0]
# The third row contains the dates and time of the occuring events
# Get all the cell from that third row
date_tabledata = therows[2].findChildren('td')
# Loop through all phases
for index, td in enumerate(events_row):
phase_name = td.a.string
# Check the phase name in lower case without spaces
if ''.join(phase_name.lower().split(' ')) in ['fullmoon', 'newmoon']:
# It is a full or new moon, get the date without the time
date_occurence = date_tabledata[index].text[:-5]
print phase_name, date_occurence
if __name__ == "__main__":
GetPhases() |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.advanced_construction import SurfacePropertyOtherSideCoefficients
log = logging.getLogger(__name__)
class TestSurfacePropertyOtherSideCoefficients(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_surfacepropertyothersidecoefficients(self):
pyidf.validation_level = ValidationLevel.error
obj = SurfacePropertyOtherSideCoefficients()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_combined_convective_or_radiative_film_coefficient = 2.2
obj.combined_convective_or_radiative_film_coefficient = var_combined_convective_or_radiative_film_coefficient
# real
var_constant_temperature = 3.3
obj.constant_temperature = var_constant_temperature
# real
var_constant_temperature_coefficient = 4.4
obj.constant_temperature_coefficient = var_constant_temperature_coefficient
# real
var_external_drybulb_temperature_coefficient = 5.5
obj.external_drybulb_temperature_coefficient = var_external_drybulb_temperature_coefficient
# real
var_ground_temperature_coefficient = 6.6
obj.ground_temperature_coefficient = var_ground_temperature_coefficient
# real
var_wind_speed_coefficient = 7.7
obj.wind_speed_coefficient = var_wind_speed_coefficient
# real
var_zone_air_temperature_coefficient = 8.8
obj.zone_air_temperature_coefficient = var_zone_air_temperature_coefficient
# object-list
var_constant_temperature_schedule_name = "object-list|Constant Temperature Schedule Name"
obj.constant_temperature_schedule_name = var_constant_temperature_schedule_name
# alpha
var_sinusoidal_variation_of_constant_temperature_coefficient = "Yes"
obj.sinusoidal_variation_of_constant_temperature_coefficient = var_sinusoidal_variation_of_constant_temperature_coefficient
# real
var_period_of_sinusoidal_variation = 0.0001
obj.period_of_sinusoidal_variation = var_period_of_sinusoidal_variation
# real
var_previous_other_side_temperature_coefficient = 12.12
obj.previous_other_side_temperature_coefficient = var_previous_other_side_temperature_coefficient
# real
var_minimum_other_side_temperature_limit = 13.13
obj.minimum_other_side_temperature_limit = var_minimum_other_side_temperature_limit
# real
var_maximum_other_side_temperature_limit = 14.14
obj.maximum_other_side_temperature_limit = var_maximum_other_side_temperature_limit
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.surfacepropertyothersidecoefficientss[0].name, var_name)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].combined_convective_or_radiative_film_coefficient, var_combined_convective_or_radiative_film_coefficient)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].constant_temperature, var_constant_temperature)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].constant_temperature_coefficient, var_constant_temperature_coefficient)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].external_drybulb_temperature_coefficient, var_external_drybulb_temperature_coefficient)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].ground_temperature_coefficient, var_ground_temperature_coefficient)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].wind_speed_coefficient, var_wind_speed_coefficient)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].zone_air_temperature_coefficient, var_zone_air_temperature_coefficient)
self.assertEqual(idf2.surfacepropertyothersidecoefficientss[0].constant_temperature_schedule_name, var_constant_temperature_schedule_name)
self.assertEqual(idf2.surfacepropertyothersidecoefficientss[0].sinusoidal_variation_of_constant_temperature_coefficient, var_sinusoidal_variation_of_constant_temperature_coefficient)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].period_of_sinusoidal_variation, var_period_of_sinusoidal_variation)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].previous_other_side_temperature_coefficient, var_previous_other_side_temperature_coefficient)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].minimum_other_side_temperature_limit, var_minimum_other_side_temperature_limit)
self.assertAlmostEqual(idf2.surfacepropertyothersidecoefficientss[0].maximum_other_side_temperature_limit, var_maximum_other_side_temperature_limit) |
from django.test import TestCase
# Create your tests here.
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from neighborhood.models import Neighborhood
class NeighborhoodTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='test_user')
def tearDown(self):
self.user.delete()
def test_list(self):
response = self.client.get(reverse('neighborhood-list'))
self.failUnlessEqual(response.status_code, 200)
def test_crud(self):
# Create new instance
response = self.client.post(reverse('neighborhood-list'), {})
self.assertContains(response, '"success": true')
# Read instance
items = Neighborhood.objects.all()
self.failUnlessEqual(items.count(), 1)
item = items[0]
response = self.client.get(reverse('neighborhood-details', kwargs={'id': item.id}))
self.failUnlessEqual(response.status_code, 200)
# Update instance
response = self.client.post(reverse('neighborhood-details', kwargs={'id': item.id}), {})
self.assertContains(response, '"success": true')
# Delete instance
response = self.client.post(reverse('neighborhood-delete', kwargs={'id': item.id}), {})
self.assertContains(response, '"success": true')
items = Neighborhood.objects.all()
self.failUnlessEqual(items.count(), 0)
|
#!/usr/bin/env python3
"""Convert an image using Pillow.
requirements: Pillow
`pip install Pillow`.
"""
import os
from PIL import Image
def save_image(picture, dirpath, name, ext):
"""Save an image.
:param picture: PIL.Image object
:param dirpath: Directory to save image.
:param name: Name of the image.
:param ext: File extenstion to use.
:return: None
"""
name = name.rpartition('.')[0]
picture.save(f'{dirpath}/{name}.{ext}')
def main(file):
"""Save an image in png format.
This is an example function.
:file: Full path of image file.
:return: None
"""
dirpath, _, filename = file.rpartition('/')
picture = Image.open(file)
save_image(picture, dirpath, filename, 'png')
if __name__ == "__main__":
# Sample image source(https://unsplash.com/photos/70Rir5vB96U)
file = os.path.realpath(
'images/example/photo-1533709752211-118fcaf03312.jpeg')
main(file)
|
"""
Django settings for density project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from pathlib import Path
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(name):
"""
Get the value of the environment variable named `name` or raise an exception.
"""
try:
return os.environ[name]
except KeyError:
raise ImproperlyConfigured(
f"The environment variable “{name}” is not set. It must be set in order for "
"density to function."
)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
CONFIG_DIR = BASE_DIR / "config"
SOURCE_DIR = BASE_DIR / "source"
TEMPLATES_DIR = BASE_DIR / "templates"
STATICFILES_DIRS = [BASE_DIR / "static"]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env_variable("DENSITY_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
# Default to False so we don't accidentally run DEBUG in production.
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"dpu",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [TEMPLATES_DIR],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": get_env_variable("DENSITY_DB_NAME"),
"USER": get_env_variable("DENSITY_DB_USER"),
"PASSWORD": get_env_variable("DENSITY_DB_PASSWORD"),
"HOST": get_env_variable("DENSITY_DB_HOST"),
"PORT": get_env_variable("DENSITY_DB_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
OCCUPANCY_SETTLED_THRESHOLD = 5 * 60 # Five minutes
|
import os
import datetime
import argparse
import dateutil.relativedelta
def main():
# argparse configuration
parser = argparse.ArgumentParser(
description='example: $ python app.py --start-date "2019-01-03" --end-date "2019-01-04"' \
' --bucket higee --path incoming --interval month --delta 1 --profile higee'
)
parser.add_argument("--start-date", required=True, help="pass in YYYY-MM-DD format")
parser.add_argument("--end-date", required=True, help="pass in YYYY-MM-DD format")
parser.add_argument("--bucket", required=True, help="pass the name of S3 bucket")
parser.add_argument("--path", required=True, help="pass the path under the S3 bucket")
parser.add_argument("--interval", required=True, choices=['year', 'month', 'day', 'hour'])
parser.add_argument("--delta", required=False, default=1, type=int, help="pass the value you want to increment interval by")
parser.add_argument("--profile", required=True, help="pass the profile allowed to access the S3 bucket")
args = parser.parse_args()
# user-input arguments configuration
start_date = datetime.datetime.strptime(args.start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(args.end_date, '%Y-%m-%d') + dateutil.relativedelta.relativedelta(hours=23)
while start_date <= end_date:
# set date_path according to args.interval
if args.interval == 'year':
date_path = f"{start_date.year:02d}"
elif args.interval == 'month':
date_path = f"{start_date.year:02d}/{start_date.month:02d}"
elif args.interval == 'day':
date_path = f"{start_date.year:02d}/{start_date.month:02d}/{start_date.day:02d}"
elif args.interval == 'hour':
date_path = f"{start_date.year:02d}/{start_date.month:02d}/{start_date.day:02d}/{start_date.hour:02d}"
cmd = f"aws s3 ls --summarize --human-readable --recursive {args.bucket}/{args.path}/{date_path}/ --profile {args.profile} | grep Total"
# print meta-data
print('='*50)
print(f"bucket : {args.bucket}")
print(f"object path : {'/'.join([args.path, date_path, ''])}")
print('='*50)
# run the command and print basic stats of S3 objects
os.system(cmd)
print('\n')
# increment interval by delta
if args.interval == 'year':
start_date += dateutil.relativedelta.relativedelta(years=args.delta)
elif args.interval == 'month':
start_date += dateutil.relativedelta.relativedelta(months=args.delta)
elif args.interval == 'day':
start_date += dateutil.relativedelta.relativedelta(days=args.delta)
elif args.interval == 'hour':
start_date += dateutil.relativedelta.relativedelta(hours=args.delta)
if __name__ == "__main__":
main()
|
"""
___ __ __ _ _____ _ _ _
|_ _| \/ |_ __ _ __ _____ _____ __| | ____|__| (_) |_ ___ _ __
| || |\/| | '_ \| '__/ _ \ \ / / _ \/ _` | _| / _` | | __/ _ \| '__|
| || | | | |_) | | | (_) \ V / __/ (_| | |__| (_| | | || (_) | |
|___|_| |_| .__/|_| \___/ \_/ \___|\__,_|_____\__,_|_|\__\___/|_|
|_|
D e f a u l t C o n f i g u r a t i o n
Default IME configuration by darhsn
"""
import ime
# keystroke_handler, handle all the keystrokes, is called every time user presses a key
def keystroke_handler(key):
if key == ord('q'):
editor.quit()
# create ime instance
editor = ime.ImeInstance(keystroke_handler=lambda k: keystroke_handler(k))
# start ime
editor.start()
|
n = int(input())
cars = set()
for _ in range(n):
direction, car_number = input().split(", ")
if direction == "IN":
cars.add(car_number)
elif direction == "OUT" and car_number in cars:
cars.remove(car_number)
if cars:
[print(car) for car in cars]
else:
print("Parking Lot is Empty")
|
"""
Shortest path algorithm implemented using BFS
"""
from __future__ import annotations
from unweighted_graphs import Graph
def shortest_path(graph: Graph, start: int, dest: int):
"""
Finds shortest path from one node to another in
an unweighted graph
:param graph: unweighted graph to perform algorithm on
:param start: vertex to start from
:param dest: vertex to reach
:return: minimum number of edges to traverse in order
to reach the dest from the point of start
>>> graph = Graph(5)
>>> graph.add_edge(0, 1)
>>> graph.add_edge(1, 2)
>>> graph.add_edge(2, 3)
>>> graph.add_edge(3, 4)
>>> print(shortest_path(graph, 0, 4))
3
>>> graph = Graph(5)
>>> graph.add_edge(0, 1)
>>> graph.add_edge(0, 2)
>>> graph.add_edge(1, 2)
>>> graph.add_edge(1, 3)
>>> graph.add_edge(2, 4)
>>> graph.add_edge(3, 4)
>>> print(shortest_path(graph, 0, 4))
2
"""
dist = [-1] * graph.vertices
dist[start] = 0
queue = [start]
while len(queue) > 0:
curr_vertex = queue.pop(0)
# Loop over all neighbors of current vertex
for i in range(0, len(graph.edges[curr_vertex])):
# Check whether we have visited the current neighbor
if dist[graph.edges[curr_vertex][i]] == -1:
dist[graph.edges[curr_vertex][i]] = dist[curr_vertex] + 1
queue.append(graph.edges[curr_vertex][i])
return dist[dest]
def main():
graph = Graph(5)
graph.edges = [
[1],
[3],
[4],
[4, 1],
[0, 1]
]
print(shortest_path(graph, 0, 4))
if __name__ == "__main__":
main()
|
from conjureup.controllers.base.addons.gui import AddonsController
_controller_class = AddonsController
|
#!/usr/bin/python
import os
from git import Repo
import git
# INSTALL PATH
llvm_path = "~"
llvm_dir = os.path.join(llvm_path, "llvm")
clang_dir = os.path.join(llvm_dir, "tools/clang")
compiler_rt_dir = os.path.join(llvm_dir, "projects/compiler-rt")
testsuite_dir = os.path.join(llvm_dir, "projects/test-suite")
clang_extra_dir = os.path.join(llvm_dir, "tools/clang/tools/extra")
libcxx_dir = os.path.join(llvm_dir, "projects/libcxx")
libcxxabi_dir = os.path.join(llvm_dir, "projects/libcxxabi")
lld_dir = os.path.join(llvm_dir, "tools/lld")
lldb_dir = os.path.join(llvm_dir, "tools/lldb")
# TOOLS TO INSTALL
clang = 1
clang_extra = 1
compiler_rt = 1
libcxx = 1
libcxxabi = 1
lld = 0
lldb = 0
testsuite = 1
# REPO
repo_llvm = "http://llvm.org/git/llvm.git"
repo_clang = "http://llvm.org/git/clang.git"
repo_clang_extra = "http://llvm.org/git/clang-tools-extra.git"
repo_compiler_rt = "http://llvm.org/git/compiler-rt.git"
repo_libcxx = "http://llvm.org/git/libcxx.git"
repo_libcxxabi = "http://llvm.org/git/libcxxabi.git"
repo_lld = "http://llvm.org/git/lld.git"
repo_lldb = "http://llvm.org/git/lldb.git"
repo_testsuite = "http://llvm.org/git/test-suite.git"
# CLONING
if(not os.path.exists(llvm_dir)):
print "Cloning llvm to " + llvm_dir
Repo.clone_from(repo_llvm, llvm_dir)
if(not os.path.exists(clang_dir) and clang):
print "Cloning clang to " + clang_dir
Repo.clone_from(repo_clang, clang_dir)
if(not os.path.exists(clang_extra_dir) and clang_extra):
print "Cloning clang extra tools to " + clang_extra_dir
Repo.clone_from(repo_clang_extra, clang_extra_dir)
if(not os.path.exists(compiler_rt_dir) and compiler_rt):
print "Cloning compiler-rt to " + compiler_rt_dir
Repo.clone_from(repo_compiler_rt, compiler_rt_dir)
if(not os.path.exists(libcxx_dir) and libcxx):
print "Cloning libc++ to " + libcxx_dir
Repo.clone_from(repo_libcxx, libcxx_dir)
if(not os.path.exists(libcxxabi_dir) and libcxxabi):
print "Cloning libc++ ABI to " + libcxxabi_dir
Repo.clone_from(repo_libcxxabi, libcxxabi_dir)
if(not os.path.exists(lld_dir) and lld):
print "Cloning lld to " + lld_dir
Repo.clone_from(repo_lld, lld_dir)
if(not os.path.exists(lldb_dir) and lldb):
print "Cloning lldb to " + lldb_dir
Repo.clone_from(repo_lldb, lldb_dir)
if(not os.path.exists(testsuite_dir) and testsuite):
print "Cloning llvm test suite to " + testsuite_dir
Repo.clone_from(repo_testsuite, testsuite_dir)
# UPDATING
print "Updating LLVM"
git.cmd.Git(llvm_dir).pull()
if(clang):
print "Updating clang"
git.cmd.Git(clang_dir).pull()
if(clang_extra):
print "Updating clang extra tools"
git.cmd.Git(clang_extra_dir).pull()
if(compiler_rt):
print "Updating compiler-rt"
git.cmd.Git(compiler_rt_dir).pull()
if(libcxx):
print "Updating libc++"
git.cmd.Git(libcxx_dir).pull()
if(libcxxabi):
print "Updating libc++ ABI"
git.cmd.Git(libcxxabi_dir).pull()
if(lld):
print "Updating lld"
git.cmd.Git(lld_dir).pull()
if(lldb):
print "Updating lldb"
git.cmd.Git(lldb_dir).pull()
if(testsuite):
print "Updating llvm test suite"
git.cmd.Git(testsuite_dir).pull()
|
from .base import GnuRecipe
class AlsaLibRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(AlsaLibRecipe, self).__init__(*args, **kwargs)
self.sha256 = '5f2cd274b272cae0d0d111e8a9e363f0' \
'8783329157e8dd68b3de0c096de6d724'
self.name = 'alsa-lib'
self.version = '1.1.6'
self.url = 'ftp://ftp.alsa-project.org/pub/lib/' \
'alsa-lib-$version.tar.bz2'
self.version_regex = r'''alsa\-lib\-(?P<version>\d+\.\d+\.\d+)\.tar\.bz2'''
self.version_url = 'http://www.mirrorservice.org/sites/' \
'ftp.alsa-project.org/pub/lib/'
self.depends = ['python2']
self.environment_strip_lto()
|
from random import randint
from tkinter import *
from tkinter import ttk
class Node:
def __init__(self, x, y, aValue):
self.x = x
self.y = y
self.leftNode = 0
self.bottomNode = 0
self.rightNode = 0
self.topNode = 0
self.aValue = aValue
class AObject:
def __init__(self, finder, start, pokemon, tablero):
self.openQ = []
self.closeQ = []
self.rightWay = []
self.steps = []
def insertStep(node):
if not self.rightWay:
print('primer paso')
self.rightWay.append(node)
# print(self.rightWay, node, self.rightWay[0].rightNode)
else:
print('entre')
for i in self.rightWay:
print('right', node.x, i.rightNode.x, node.y, i.rightNode.y)
print('left', node.x, i.leftNode.x, node.y, i.leftNode.y)
print('top', node.x, i.topNode.x, node.y, i.topNode.y)
print('bottom', node.x, i.bottomNode.x,
node.y, i.bottomNode.y)
if i.rightNode != 0:
if (node.x == i.rightNode.x and node.y == i.rightNode.y):
self.rightWay = self.rightWay[0: self.rightWay.index(
i) + 1]
break
if i.leftNode != 0:
if (node.x == i.leftNode.x and node.y == i.leftNode.y):
self.rightWay = self.rightWay[0: self.rightWay.index(
i) + 1]
break
if i.topNode != 0:
if (node.x == i.topNode.x and node.y == i.topNode.y):
self.rightWay = self.rightWay[0: self.rightWay.index(
i) + 1]
break
if i.bottomNode != 0:
if (node.x == i.bottomNode.x and node.y == i.bottomNode.y):
self.rightWay = self.rightWay[0: self.rightWay.index(
i) + 1]
break
def insertClose(node):
if self.openQ:
for i in self.openQ:
if node.x == i.x and node.y == i.y:
self.openQ.remove(i)
break
if self.closeQ:
for i in self.closeQ:
if node.aValue <= i.aValue:
self.closeQ.insert(self.closeQ.index(i), node)
break
if node.aValue > self.closeQ[-1].aValue:
self.closeQ.append(node)
else:
self.closeQ.append(node)
def insertOpen(node):
# print('Agregando nodo')
if self.closeQ:
for i in self.closeQ:
if node.x == i.x and node.y == i.y:
return
if self.openQ:
for i in self.openQ:
# print('buscando lugar para el nodo')
if node.aValue <= i.aValue:
self.openQ.insert(self.openQ.index(i), node)
# print('nodo agregado')
break
if node.aValue > self.openQ[-1].aValue:
self.openQ.append(node)
# print('nodo agregado')
else:
self.openQ.append(node)
# print('primer nodo agregado')
def findWay(goal):
self.rightWay = []
def wayWithoutObstacle(finder):
obstacles = {}
if finder.x > 0:
if (tablero[finder.y][finder.x - 1].name != 'Rock') and (tablero[finder.y][finder.x - 1].name != 'Van'):
obstacles['left'] = (True)
else:
obstacles['left'] = (False)
else:
obstacles['left'] = (False)
if finder.x < 9:
if (tablero[finder.y][finder.x + 1].name != 'Rock') and (tablero[finder.y][finder.x + 1].name != 'Van'):
obstacles['right'] = (True)
else:
obstacles['right'] = (False)
else:
obstacles['right'] = (False)
if finder.y > 0:
if (tablero[finder.y - 1][finder.x].name != 'Rock') and (tablero[finder.y - 1][finder.x].name != 'Van'):
obstacles['up'] = (True)
else:
obstacles['up'] = (False)
else:
obstacles['up'] = (False)
if finder.y < 9:
if (tablero[finder.y + 1][finder.x].name != 'Rock') and (tablero[finder.y + 1][finder.x].name != 'Van'):
obstacles['down'] = (True)
else:
obstacles['down'] = (False)
else:
obstacles['down'] = (False)
return obstacles
def manhatan(startX, startY, goal):
return abs(startX - goal.x) + abs(startY - goal.y)
g_n_ = manhatan(finder.x, finder.y, start)
h_n_ = manhatan(finder.x, finder.y, goal)
currentTrainer = Trainer(finder.y, finder.x)
while True:
a = input()
print('Pokemon', goal.x, goal.y)
if self.openQ:
currentTrainer = Trainer(self.openQ[0].y, self.openQ[0].x)
g_n_ = manhatan(currentTrainer.x, currentTrainer.y, start)
h_n_ = manhatan(currentTrainer.x, currentTrainer.y, goal)
print('Pokebola', currentTrainer.x, currentTrainer.y)
currentNode = Node(
currentTrainer.x, currentTrainer.y, g_n_ + h_n_)
obstacles = wayWithoutObstacle(currentTrainer)
print(obstacles)
insertClose(currentNode)
# for k in self.closeQ:
# print('Cola cerrada', '[', k.x, k.y, k.aValue, ']')
if obstacles['left']:
# print('izq')
g_n_ = manhatan(currentTrainer.x - 1,
currentTrainer.y, start)
h_n_ = manhatan(currentTrainer.x - 1,
currentTrainer.y, goal)
insertOpen(Node(currentTrainer.x - 1,
currentTrainer.y, g_n_ + h_n_))
currentNode.leftNode = Node(
currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)
if obstacles['right']:
# print('der')
g_n_ = manhatan(currentTrainer.x + 1,
currentTrainer.y, start)
h_n_ = manhatan(currentTrainer.x + 1,
currentTrainer.y, goal)
insertOpen(Node(currentTrainer.x + 1,
currentTrainer.y, g_n_ + h_n_))
currentNode.rightNode = Node(
currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)
if obstacles['up']:
# print('arriba')
g_n_ = manhatan(currentTrainer.x,
currentTrainer.y - 1, start)
h_n_ = manhatan(currentTrainer.x,
currentTrainer.y - 1, goal)
insertOpen(
Node(currentTrainer.x, currentTrainer.y - 1, g_n_ + h_n_))
currentNode.topNode = Node(
currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)
if obstacles['down']:
# print('abajo')
g_n_ = manhatan(currentTrainer.x,
currentTrainer.y + 1, start)
h_n_ = manhatan(currentTrainer.x,
currentTrainer.y + 1, goal)
insertOpen(
Node(currentTrainer.x, currentTrainer.y + 1, g_n_ + h_n_))
currentNode.bottomNode = Node(
currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)
insertStep(currentNode)
# for k in self.openQ:
# print('Cola abierta', '[', k.x, k.y, k.aValue, ']')
if currentTrainer.x == goal.x and currentTrainer.y == goal.y:
for k in self.rightWay:
print('Paso', '[', k.x, k.y, ']')
return self.rightWay
self.steps.append(findWay(pokemon[0]))
class Pokemon:
def __init__(self, i, j, pokemonId, container):
self.name = 'Pokemon'
self.pokemonId = pokemonId
self.image = PhotoImage(file='images/' + str(pokemonId) + '.png')
self.y = i
self.x = j
self.label = Label(
container,
height='64',
width='64',
borderwidth='2',
image=self.image
)
class Grass:
def __init__(self, i, j, container):
self.name = 'Grass'
self.image = PhotoImage(file='images/grass.png')
self.y = i
self.x = j
self.label = Label(
container,
height='64',
width='64',
borderwidth='2',
image=self.image
)
class Rock:
def __init__(self, i, j, container):
self.name = 'Rock'
self.image = PhotoImage(file='images/rock.png')
self.y = i
self.x = j
self.label = Label(
container,
height='64',
width='64',
borderwidth='2',
image=self.image
)
class Bean:
def __init__(self, i, j, container):
self.name = 'Bean'
self.image = PhotoImage(file='images/jelly-beans.png')
self.y = i
self.x = j
self.label = Label(
container,
height='64',
width='64',
borderwidth='2',
image=self.image
)
class Trainer:
def __init__(self, i, j, container=False, pokeball=False):
self.name = 'Trainer'
self.y = i
self.x = j
self.back = False
if container:
self.image = PhotoImage(file='images/' + pokeball + '.png')
self.label = Label(
container,
height='64',
width='64',
borderwidth='2',
image=self.image
)
class Van:
def __init__(self, i, j, container):
self.name = 'Van'
self.image = PhotoImage(file='images/van.png')
self.y = i
self.x = j
self.label = Label(
container,
height='64',
width='64',
borderwidth='2',
image=self.image
)
class Tablero:
def __init__(self, size):
self.window = Tk()
self.window.title('Pokemon Finder')
self.size = size
self.tablero = []
self.pokemonArray = []
self.trainer = Trainer(randint(0, self.size), randint(
0, self.size), self.window, 'pokeball2')
for i in range(10):
self.tablero.append([])
for j in range(10):
if ((j == self.trainer.x) & (i == self.trainer.y - 1)):
self.van = Van(i, j, self.window)
self.tablero[i].append(self.van)
elif randint(0, 6) == 1:
pokemon = Pokemon(i, j, randint(1, 19), self.window)
self.pokemonArray.append(pokemon)
self.tablero[i].append(pokemon)
elif randint(0, 6) == 1:
rock = Rock(i, j, self.window)
self.tablero[i].append(rock)
else:
grass = Grass(i, j, self.window)
self.tablero[i].append(grass)
for i in range(10):
for j in range(10):
self.tablero[i][j].label.grid(
column=self.tablero[i][j].x, row=self.tablero[i][j].y)
self.window.after(500, self.findPokemon)
self.window.mainloop()
def findPokemon(self):
def Move(trainer):
def rightMove(leaveBean=False):
if leaveBean:
# self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)
self.tablero[trainer.y][trainer.x + 1] = Trainer(
trainer.y, trainer.x + 1, self.window, 'pokeball1')
else:
self.tablero[trainer.y][trainer.x + 1] = Trainer(
trainer.y, trainer.x + 1, self.window, 'pokeball2')
self.tablero[trainer.y][trainer.x] = Grass(
trainer.y, trainer.x, self.window)
self.tablero[trainer.y][trainer.x].label.grid(
column=trainer.x, row=trainer.y)
self.tablero[trainer.y][trainer.x +
1].label.grid(column=trainer.x + 1, row=trainer.y)
trainer.x += 1
def leftMove(leaveBean=False):
if leaveBean:
# self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)
self.tablero[trainer.y][trainer.x - 1] = Trainer(
trainer.y, trainer.x - 1, self.window, 'pokeball1')
else:
self.tablero[trainer.y][trainer.x - 1] = Trainer(
trainer.y, trainer.x - 1, self.window, 'pokeball2')
self.tablero[trainer.y][trainer.x] = Grass(
trainer.y, trainer.x, self.window)
self.tablero[trainer.y][trainer.x].label.grid(
column=trainer.x, row=trainer.y)
self.tablero[trainer.y][trainer.x -
1].label.grid(column=trainer.x - 1, row=trainer.y)
trainer.x -= 1
def downMove(leaveBean=False):
if leaveBean:
# self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)
self.tablero[trainer.y + 1][trainer.x] = Trainer(
trainer.y + 1, trainer.x, self.window, 'pokeball1')
else:
self.tablero[trainer.y + 1][trainer.x] = Trainer(
trainer.y + 1, trainer.x, self.window, 'pokeball2')
self.tablero[trainer.y][trainer.x] = Grass(
trainer.y, trainer.x, self.window)
self.tablero[trainer.y][trainer.x].label.grid(
column=trainer.x, row=trainer.y)
self.tablero[trainer.y +
1][trainer.x].label.grid(column=trainer.x, row=trainer.y + 1)
trainer.y += 1
def upMove(leaveBean=False):
if leaveBean:
# self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)
self.tablero[trainer.y - 1][trainer.x] = Trainer(
trainer.y - 1, trainer.x, self.window, 'pokeball1')
else:
self.tablero[trainer.y - 1][trainer.x] = Trainer(
trainer.y - 1, trainer.x, self.window, 'pokeball2')
self.tablero[trainer.y][trainer.x] = Grass(
trainer.y, trainer.x, self.window)
self.tablero[trainer.y][trainer.x].label.grid(
column=trainer.x, row=trainer.y)
self.tablero[trainer.y -
1][trainer.x].label.grid(column=trainer.x, row=trainer.y - 1)
trainer.y -= 1
def isPokemonClose():
if trainer.x < self.size - 1 and self.tablero[trainer.y][trainer.x+1].name == 'Pokemon':
return 'right'
elif trainer.x > 0 and self.tablero[trainer.y][trainer.x-1].name == 'Pokemon':
return 'left'
elif trainer.y < self.size - 1 and self.tablero[trainer.y + 1][trainer.x].name == 'Pokemon':
return 'down'
elif trainer.y > 0 and self.tablero[trainer.y - 1][trainer.x].name == 'Pokemon':
return 'up'
def wayWithoutObstacle():
obstacles = {}
if trainer.x > 0:
if (self.tablero[trainer.y][trainer.x - 1].name != 'Rock') and (self.tablero[trainer.y][trainer.x - 1].name != 'Van'):
obstacles['left'] = (True)
else:
obstacles['left'] = (False)
else:
obstacles['left'] = (False)
if trainer.x < self.size - 1:
if (self.tablero[trainer.y][trainer.x + 1].name != 'Rock') and (self.tablero[trainer.y][trainer.x + 1].name != 'Van'):
obstacles['right'] = (True)
else:
obstacles['right'] = (False)
else:
obstacles['right'] = (False)
if trainer.y > 0:
if (self.tablero[trainer.y - 1][trainer.x].name != 'Rock') and (self.tablero[trainer.y - 1][trainer.x].name != 'Van'):
obstacles['up'] = (True)
else:
obstacles['up'] = (False)
else:
obstacles['up'] = (False)
if trainer.y < self.size - 1:
if (self.tablero[trainer.y + 1][trainer.x].name != 'Rock') and (self.tablero[trainer.y + 1][trainer.x].name != 'Van'):
obstacles['down'] = (True)
else:
obstacles['down'] = (False)
else:
obstacles['down'] = (False)
return obstacles
def chooseWay(obstacles):
choose = randint(0, 3)
if choose == 0 and obstacles['left']:
return 'left'
elif choose == 1 and obstacles['right']:
return 'right'
elif choose == 2 and obstacles['up']:
return 'up'
elif choose == 3 and obstacles['down']:
return 'down'
else:
return chooseWay(obstacles)
def backToVan():
def chooseBackWay():
min = abs(trainer.x + 1 - self.van.x) + \
abs(trainer.y - self.van.y)
if (abs(trainer.x - 1 - self.van.x) + abs(trainer.y - self.van.y) < min) and wayWithoutObstacle()['left'] and isPokemonClose() != 'left':
return 'left'
elif (abs(trainer.x - self.van.x) + abs(trainer.y + 1 - self.van.y) < min) and wayWithoutObstacle()['down'] and isPokemonClose() != 'down':
return 'down'
elif (abs(trainer.x - self.van.x) + abs(trainer.y - 1 - self.van.y) < min) and wayWithoutObstacle()['up'] and isPokemonClose() != 'up':
return 'up'
elif wayWithoutObstacle()['right'] and isPokemonClose() != 'right':
return 'right'
else:
None
def isVanClose():
if self.trainer.x < self.size - 1:
if self.tablero[trainer.y][trainer.x+1].name == 'Van':
return True
if self.trainer.x > 0:
if self.tablero[trainer.y][trainer.x-1].name == 'Van':
return True
if self.trainer.y < self.size - 1:
if self.tablero[trainer.y+1][trainer.x].name == 'Van':
return True
if self.trainer.y > 0:
if self.tablero[trainer.y-1][trainer.x].name == 'Van':
return True
else:
return False
pokemonGotcha(True)
try:
if isVanClose():
pokemonGotcha(False)
elif chooseBackWay() == 'right':
rightMove(True)
elif chooseBackWay() == 'left':
leftMove(True)
elif chooseBackWay() == 'down':
downMove(True)
elif chooseBackWay() == 'up':
upMove(True)
except Exception as error:
print(error)
def pokemonGotcha(gotIt):
self.trainer.back = gotIt
self.trainer.image = PhotoImage(file='images/pokeball1.png')
self.trainer.label.config(image=self.trainer.image)
self.a = AObject(self.trainer, self.van,
self.pokemonArray, self.tablero)
# print(self.a.openQ, self.a.closeQ)
Move(self.trainer)
self.window.after(500, self.findPokemon)
def main():
tierra = Tablero(10)
# x = j | y = i
if __name__ == '__main__':
main()
|
import shutil
from pathlib import Path
import cv2
from utils import get_all_files_in_folder
from numpy import loadtxt
from tqdm import tqdm
def convert_bboxes_format(input_txt_dir, output_txt_dir, input_format, output_format, one_class=None,
img_ext='png',
txt_ext=None,
delimiter_source_txt=' '):
if txt_ext is None:
txt_ext = ['*.txt']
# clear folder
dirpath = Path(output_txt_dir)
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
Path(dirpath).mkdir(parents=True, exist_ok=True)
txt_list = get_all_files_in_folder(input_txt_dir, txt_ext)
for ind, txt in tqdm(enumerate(txt_list), total=len(txt_list)):
image = cv2.imread(str(input_txt_dir) + '/' + txt.stem + '.' + img_ext, cv2.IMREAD_UNCHANGED)
h, w = image.shape[:2]
if input_format == 'yolo':
if output_format == 'cxywh':
filename = txt.stem
lines = loadtxt(str(Path(input_txt_dir).joinpath(txt.name)), delimiter=delimiter_source_txt,
unpack=False)
if lines.shape.__len__() == 1:
lines = [lines]
with open(Path(output_txt_dir).joinpath(txt.name), 'w') as f:
for item in lines:
# print(filename)
# if filename == '01c3b410363f417836272fbc95eea13d4693ce18b653bc7219dc9433ce87fb91':
# print()
# print(item)
# print(item[3])
# print(image_size_wh[0])
width = int(item[3] * w)
height = abs(int(item[4] * h))
if width > w: width = w
if height > h: height = h
x = abs(int(item[1] * w - width / 2))
y = abs(int(item[2] * h - height / 2))
if one_class:
label = one_class
else:
if item[0].is_integer():
label = int(item[0])
else:
label = item[0]
rec = str(label) + ' ' + str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height)
f.write("%s\n" % rec)
elif output_format == 'cx1y1x2y2':
filename = txt.stem
lines = loadtxt(str(Path(input_txt_dir).joinpath(txt.name)), delimiter=delimiter_source_txt,
unpack=False)
if lines.shape.__len__() == 1:
lines = [lines]
with open(Path(output_txt_dir).joinpath(txt.name), 'w') as f:
if len(lines[0] != 0):
for item in lines:
# print(filename)
# if filename == '01c3b410363f417836272fbc95eea13d4693ce18b653bc7219dc9433ce87fb91':
# print()
# print(item)
# print(item[3])
# print(image_size_wh[0])
width = int(item[3] * w)
height = abs(int(item[4] * h))
if width > w: width = w
if height > h: height = h
x1 = abs(int(item[1] * w - width / 2))
y1 = abs(int(item[2] * h - height / 2))
x2 = abs(int(item[1] * w + width / 2))
y2 = abs(int(item[2] * h + height / 2))
if one_class:
label = one_class
else:
if item[0].is_integer():
label = int(item[0])
else:
label = item[0]
rec = str(label) + ' ' + str(x1) + ' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2)
f.write("%s\n" % rec)
elif input_format == 'cx1y1x2y2':
if output_format == 'yolo':
filename = txt.stem
lines = loadtxt(str(Path(input_txt_dir).joinpath(txt.name)), delimiter=delimiter_source_txt,
unpack=False)
if lines.shape.__len__() == 1:
lines = [lines]
with open(Path(output_txt_dir).joinpath(txt.name), 'w') as f:
if len(lines[0] != 0):
for item in lines:
# print(filename)
# if filename == '01c3b410363f417836272fbc95eea13d4693ce18b653bc7219dc9433ce87fb91':
# print()
# print(item)
# print(item[3])
# print(image_size_wh[0])
width = (int(item[3]) - int(item[1])) / w
height = (int(item[4]) - int(item[2])) / h
if width > 1: width = 1
if height > 1: height = 1
xcenter = ((int(item[3]) - int(item[1])) / 2 + int(item[1])) / w
ycenter = ((int(item[4]) - int(item[2])) / 2 + int(item[2])) / h
if one_class:
label = one_class
else:
if item[0].is_integer():
label = int(item[0])
else:
label = item[0]
rec = str(label) + ' ' + str(xcenter) + ' ' + str(ycenter) + ' ' + str(width) + ' ' + str(height)
f.write("%s\n" % rec)
cv2.imwrite(str(output_txt_dir) + '/' + txt.stem + '.' + img_ext, image)
if __name__ == '__main__':
input_txt_dir = Path('data/convert_bboxes_format/input_txt')
output_txt_dir = Path('data/convert_bboxes_format/output_txt')
input_format = 'yolo' # class, x_center, y_center, width, height, relative values
# output_format = 'cxywh' # class, x_left, y_top, width, height, absolute values
output_format = 'cx1y1x2y2' # class, x_left, y_top, x_right, y_bottom, absolute values
# image_size_wh = [1920, 1080]
img_ext = 'jpg'
txt_ext = ['*.txt']
convert_bboxes_format(input_txt_dir=input_txt_dir, output_txt_dir=output_txt_dir, input_format=input_format,
output_format=output_format, img_ext=img_ext, txt_ext=txt_ext)
|
from rest_framework import status, permissions
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_simplejwt.exceptions import InvalidToken, TokenError
from rest_framework_simplejwt.views import TokenViewBase
from django.conf import settings
from .serializers import AppUserSerializer, AppTokenObtainPairSerializer
class InvalidUser(AuthenticationFailed):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = ('User credentials are invalid or expired.')
default_code = 'user_credentials_not_valid'
class AppUserCreate(APIView):
authentication_classes = []
permission_classes = (permissions.AllowAny,)
def post(self, request, format='json'):
serializer = AppUserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
json = serializer.data
return Response(json, status=status.HTTP_201_CREATED)
return Response(
serializer.errors,
headers=settings.ACCESS_CONTROL_RESPONSE_HEADERS,
status=status.HTTP_400_BAD_REQUEST
)
class ObtainRefreshToken(TokenViewBase):
authentication_classes = []
permission_classes = (permissions.AllowAny,)
serializer_class = AppTokenObtainPairSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except AuthenticationFailed as e:
raise InvalidUser(e.args[0])
except TokenError as e:
raise InvalidToken(e.args[0])
return Response(
serializer.validated_data,
headers=settings.ACCESS_CONTROL_RESPONSE_HEADERS,
status=status.HTTP_200_OK
) |
#!/usr/bin/env python3
"""Shakespearean insult generator"""
import sys
import random
adjectives = """
scurvy old filthy scurilous lascivious foolish rascaly gross rotten corrupt
foul loathsome irksome heedless unmannered whoreson cullionly false filthsome
toad-spotted caterwauling wall-eyed insatiate vile peevish infected
sodden-witted lecherous ruinous indistinguishable dishonest thin-faced
slanderous bankrupt base detestable rotten dishonest lubbery
""".strip().split()
nouns = """
knave coward liar swine villain beggar slave scold jolthead whore barbermonger
fishmonger carbuncle fiend traitor block ape braggart jack milksop boy harpy
recreant degenerate Judas butt cur Satan ass coxcomb dandy gull minion
ratcatcher maw fool rogue lunatic varlet worm
""".strip().split()
args = sys.argv[1:]
num = int(args[0]) if len(args) > 0 and args[0].isdigit() else 5
for _ in range(num):
adjs = [random.choice(adjectives) for _ in range(3)]
print('You {} {}!'.format(', '.join(adjs), random.choice(nouns)))
|
"""
Parameter Estimation tests #4.
Variant of #3 but with sodium conductance pars being optimized.
Requires Dopri integrator because more computational power is needed!
Robert Clewley, March 2005.
"""
from __future__ import print_function
# PyDSTool imports
from PyDSTool import *
from PyDSTool.Toolbox.ParamEst import LMpest
from PyDSTool.Toolbox.neuro_data import *
import HH_model
from time import clock
# print "This test runs much more efficiently using the Dopri integrator"
gentype = 'dopri'
genlang = 'c'
# ----------------------------------------------------------------
tdata = [0, 20]
par_args_HH_goal = {'gna': 100, 'gk': 80, 'gl': 0.1,
'vna': 50, 'vk': -100, 'vl': -67,
'Iapp': 1.3, 'C': 1.0}
ic_args_HH = {'v':-70, 'm': 0, 'h': 1, 'n': 0}
HH_goal = HH_model.makeHHneuron('goalHH', par_args_HH_goal, ic_args_HH,
extra_terms='-0.03*(sin(9.1*t)*cos(2.6*t)+sin(5.1119*t+2))*(v-60)')
HH_goal.set(tdata=tdata, algparams={'init_step':0.1})
goaltraj = HH_goal.compute('goalHHtraj')
HH_event_args = args(name='HH_zerothresh',
eventtol=1e-3,
eventdelay=1e-3,
starttime=0,
active=True,
precise=True)
HH_thresh_ev = Events.makePythonStateZeroCrossEvent('v', 0, 1,
HH_event_args, goaltraj.variables['v'])
result = HH_thresh_ev.searchForEvents(tuple(tdata))
HH_spike_t = result[0][0]
print("True HH spike time based on threshold event is at ", HH_spike_t)
print("but assume the traj is real data so that we have to find the spike")
print("directly from the noisy data")
## Set up external interface for the reference trajectory based on spike time
tmesh = goaltraj.indepdomain.sample(dt=(tdata[1]-tdata[0])/100.,
avoidendpoints=True)
## DATA SPIKE ===================
# quantitative feature
sp_feat = spike_feature('spike_feat', pars=args(tol=0.6))
spike_condition = condition({sp_feat: True})
# one interface for judging the spike (uses a qual feature to process the ref
# trajectory)
is_spike = get_spike_data('is_spike', pars=args(height_tol=2.,
fit_width_max=1.,
weight=0,
width_tol=15,
noise_tol=0.5,
thresh_pc=0.15,
eventtol=1e-4,
coord='v',tlo=tdata[0],
thi=tdata[1]))
assert is_spike(goaltraj)
class ext_spike_iface(extModelInterface):
def postprocess_test_traj(self, traj):
# convert traj to individual spike time, value pair
assert is_spike(traj)
spike_time = is_spike.results.spike_time
spike_height = is_spike.results.spike_val
return numeric_to_traj([[spike_time], [spike_height]], self._trajname,
['sptime','spval'],
indepvar=[0])
spike_interface = ext_spike_iface(goaltraj,
conditions=spike_condition,
compatibleInterfaces=['int_spike_iface'])
## DATA GEOM ===================
geom_feat = geom_feature('geom_feat', pars=args(tol=10,
tmesh=tmesh,
depvar='v'))
geom_condition = condition({geom_feat: True})
# one interface for judging the shape of the V trajectory
class ext_geom_iface(extModelInterface):
pass
geom_interface = ext_geom_iface(goaltraj,
conditions=geom_condition,
compatibleInterfaces=['int_geom_iface'])
## ----------------------------------------------------------------------
## Set up test HH model
par_args_HH_test = {'gna': 95, 'gk': 82, 'gl': 0.12,
'vna': 48, 'vk': -95, 'vl': -67.5,
'Iapp': 1.32, 'C': 1.0}
# Note that these params are not the same as that for goal, even though we're not
# optimizing them
DS_event_args = args(name='threshold',
eventtol=5e-3,
eventdelay=1e-3,
starttime=0,
active=True,
term=False,
precise=True)
thresh_ev = Events.makeZeroCrossEvent('v', 1, DS_event_args, varnames=['v'],
targetlang=genlang)
HH_test = HH_model.makeHHneuron('testHH2', par_args_HH_test, ic_args_HH,
thresh_ev, gentype=gentype)
if genlang == 'python':
# need to force more accuracy b/c less efficient integrator
init_step = 1e-3
else:
init_step = 1e-2
HH_test.set(tdata=tdata, algparams={'atol':1e-9,'rtol':1e-8, 'init_step': init_step,
'min_step':1e-5})
# Make model out of HH DS
HH_test_model = embed(HH_test, ic_args_HH)
HH_test_model.compute(trajname='orig')
class int_spike_iface(intModelInterface):
def postprocess_test_traj(self, traj):
evpts = traj.getEvents('threshold')
# catch "broken" output and penalize
if evpts is None:
ev_t = [300]
ev_v = [300]
elif len(evpts) != 1:
ev_t = [300]
ev_v = [300]
else:
ev_t = evpts['t']
ev_v = evpts['v']
return numeric_to_traj([ev_t, ev_v], self._trajname,
['sptime', 'spval'],
indepvar=[0])
class int_geom_iface(intModelInterface):
def postprocess_test_traj(self, traj):
# use tmesh of data points only (may not be the same mesh as was used by
# this model traj, that's why we have to resample
varray = traj(tmesh)['v']
return numeric_to_traj([varray], self._trajname, ['v'],
indepvar=tmesh)
pest_context = context([ (spike_interface, int_spike_iface),
(geom_interface, int_geom_iface) ])
## Parameter estimation
print('Estimating pars gna and vl for fit to non-identical HH cell')
print('Goal values are gna =', par_args_HH_goal['gna'], ', gl =', \
par_args_HH_goal['gl'], ' ...')
pest_pars = LMpest(freeParams=['gna', 'gl'],
testModel=HH_test_model,
context=pest_context,
verbose_level=2,
usePsyco=False
)
# In case finite difference stepsize needs adjusting
pest_pars.fn.eps=1e-5
pest_context.set_weights({spike_interface: {sp_feat: 10},
geom_interface: {geom_feat: 0.2}})
t0=clock()
pestData_par_phase1 = pest_pars.run(parDict={'ftol':1e-5,
'xtol':1e-6
},
verbose=True)
HH_test_model.set(pars=pestData_par_phase1['pars_sol'])
pest_context.set_weights({spike_interface: {sp_feat: 5},
geom_interface: {geom_feat: 0.7}})
pestData_par = pest_pars.run(parDict={'ftol':1e-5,
'xtol':1e-6
},
verbose=True)
print('... finished in %.4f seconds\n'%(clock()-t0))
## Finish preparing plots
print('\nPreparing plots')
figure()
disp_dt = 0.05
plotData_orig = HH_test_model.sample('orig', ['v'], disp_dt, precise=True)
origleg = "v initial"
plotData_goal = goaltraj.sample(['v'], disp_dt)
goalleg = "v goal"
plotData_par = HH_test_model.sample('test_iface_traj', ['v'], disp_dt)
plt.ylabel('v')
plt.xlabel('t')
goalline = plt.plot(plotData_goal['t'], plotData_goal['v'])
origline = plt.plot(plotData_orig['t'], plotData_orig['v'])
estline = plt.plot(plotData_par['t'], plotData_par['v'])
estleg = 'v estimated'
plt.legend([origline, goalline, estline],
[origleg, goalleg, estleg],
'lower left')
show()
|
import re
from datetime import datetime, timedelta, timezone
from time import sleep
from pytest_cloudflare_worker import TestClient
from pytest_toolbox.comparison import AnyInt, CloseToNow, RegexStr
def test_create_get(client: TestClient):
r = client.delete('/testing/storage/')
assert r.status_code == 200, r.text
r = client.post('/create/', headers={'authorisation': 'YWJjZAy'})
assert r.status_code == 200, r.text
obj = r.json()
# debug(obj)
assert obj['message'] == 'New site created successfully'
assert obj['url'].startswith('https://example.com/')
assert obj['site_creation'] == RegexStr(r'20\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z')
site_creation = datetime.fromisoformat(obj['site_creation'][:-1]).replace(tzinfo=timezone.utc)
assert site_creation == CloseToNow()
site_expiration = datetime.fromisoformat(obj['site_expiration'][:-1]).replace(tzinfo=timezone.utc)
assert site_expiration - site_creation == timedelta(seconds=90)
pk = re.sub(r'^https://example\.com/', '', obj['url']).strip('/')
r = client.post(
f'/{pk}/',
data='<h1>this is a test</h1>',
headers={'authorisation': obj['secret_key'], 'content-type': 'text/html'},
)
assert r.status_code == 200, r.text
assert r.json() == {'path': '/', 'content_type': 'text/html', 'size': 23, 'total_site_size': 23}
r = client.get(f'/{pk}/')
assert r.status_code == 200, r.text
assert r.text == '<h1>this is a test</h1>'
assert r.headers['content-type'] == 'text/html'
r = client.get(f'/{pk}/.smokeshow.json')
assert r.status_code == 200, r.text
assert r.json() == {
'url': f'https://example.com/{pk}/',
'site_creation': CloseToNow(delta=10),
'site_expiration': RegexStr(r'20\d\d-\d\d-\d\dT.+'),
'files': ['/'],
'total_site_size': 175,
}
assert r.headers['content-type'] == 'application/json'
r = client.post(
f'/{pk}/foobar.html',
data='<h1>this is my page</h1>',
headers={'authorisation': obj['secret_key'], 'content-type': 'foo/bar'},
)
assert r.status_code == 200, r.text
assert r.json() == {'path': '/foobar.html', 'content_type': 'foo/bar', 'size': 24, 'total_site_size': 47}
r = client.get(f'/{pk}/foobar/')
assert r.status_code == 200, r.text
assert r.text == '<h1>this is my page</h1>'
assert r.headers['content-type'] == 'foo/bar'
expiration = int(round(site_expiration.replace(tzinfo=timezone.utc).timestamp()))
r = client.get('/testing/storage/', params={'prefix': f'site:{pk}'})
assert r.status_code == 200, r.text
# debug(r.json())
# debug(client.inspect_log_wait(wait_time=3))
assert r.json() == {
f'site:{pk}:/': {
'value': '1',
'metadata': {
'size': 23,
'content_type': 'text/html',
'hash': 'H4OFKgUZxqnmcsGSJH4+soIyellWXc1Kq5t/fzbuHhQ=',
},
'expiration': expiration,
},
f'site:{pk}:/.smokeshow.json': {
'value': RegexStr('{.+}'),
'metadata': {'content_type': 'application/json', 'size': AnyInt()},
'expiration': expiration,
},
f'site:{pk}:/foobar.html': {
'value': '1',
'metadata': {'size': 24, 'content_type': 'foo/bar', 'hash': 'jqfqkZwCywQ/gc9JZlkCIj3pbO7fBy9TTpSVYDCWfio='},
'expiration': expiration,
},
}
r = client.get('/testing/storage/', params={'prefix': 'file:'})
assert r.status_code == 200, r.text
obj = r.json()
assert obj['file:H4OFKgUZxqnmcsGSJH4+soIyellWXc1Kq5t/fzbuHhQ='] == {
'value': '<h1>this is a test</h1>',
'metadata': {'path': '/', 'public_key': pk},
'expiration': expiration,
}
assert obj['file:jqfqkZwCywQ/gc9JZlkCIj3pbO7fBy9TTpSVYDCWfio='] == {
'value': '<h1>this is my page</h1>',
'metadata': {'path': '/foobar.html', 'public_key': pk},
'expiration': expiration,
}
def test_404(client: TestClient):
r = client.delete('/testing/storage/')
assert r.status_code == 200, r.text
r = client.post('/create/', headers={'authorisation': 'YWJjZAy'})
assert r.status_code == 200, r.text
obj = r.json()
# debug(obj)
assert obj['message'] == 'New site created successfully'
assert obj['url'].startswith('https://example.com/')
pk = re.sub(r'^https://example\.com/', '', obj['url']).strip('/')
r = client.get(f'/{pk}/missing.html')
assert r.status_code == 404, r.text
assert r.text == f'404: File "/missing.html" not found in site "{pk}"'
assert r.headers['content-type'].startswith('text/plain')
r = client.post(
f'/{pk}/404.html',
data='<h1>Page not found :-(</h1>',
headers={'authorisation': obj['secret_key'], 'content-type': 'text/html'},
)
assert r.status_code == 200, r.text
assert r.json() == {'path': '/404.html', 'content_type': 'text/html', 'size': 27, 'total_site_size': 27}
r = client.get(f'/{pk}/missing.html')
assert r.status_code == 404, r.text
assert r.text == '<h1>Page not found :-(</h1>'
assert r.headers['content-type'] == 'text/html'
def test_duplicate_file(client: TestClient):
r = client.delete('/testing/storage/')
assert r.status_code == 200, r.text
r = client.post('/create/', headers={'authorisation': 'YWJjZAy'})
assert r.status_code == 200, r.text
obj = r.json()
pk1 = re.sub(r'^https://example\.com/', '', obj['url']).strip('/')
key1 = obj['secret_key']
site_expiration = datetime.fromisoformat(obj['site_expiration'][:-1]).replace(tzinfo=timezone.utc)
expiration1 = int(round(site_expiration.timestamp()))
content = 'this is a test file'
r = client.post(
f'/{pk1}/snap.file',
data=content,
headers={'authorisation': key1, 'content-type': 'text/html'},
)
assert r.status_code == 200, r.text
# make sure expiration2 > expiration1
sleep(1)
r = client.post('/create/', headers={'authorisation': 'YWJjZAy'})
assert r.status_code == 200, r.text
obj = r.json()
pk2 = re.sub(r'^https://example\.com/', '', obj['url']).strip('/')
key2 = obj['secret_key']
site_expiration = datetime.fromisoformat(obj['site_expiration'][:-1]).replace(tzinfo=timezone.utc)
expiration2 = int(round(site_expiration.timestamp()))
assert expiration2 > expiration1
r = client.post(
f'/{pk2}/different.file',
data=content,
headers={'authorisation': key2, 'content-type': 'foo/bar'},
)
assert r.status_code == 200, r.text
r = client.get(f'/{pk1}/snap.file')
assert r.status_code == 200, r.text
assert r.text == content
assert r.headers['content-type'] == 'text/html'
r = client.get(f'/{pk2}/different.file')
assert r.status_code == 200, r.text
assert r.text == content
assert r.headers['content-type'] == 'foo/bar'
r = client.get('/testing/storage/', params={'prefix': f'site:{pk1}'})
assert r.status_code == 200, r.text
assert r.json() == {
f'site:{pk1}:/.smokeshow.json': {
'value': RegexStr(fr'\{{\n "url": "https://example.com/{pk1}/",\n.*'),
'metadata': {'content_type': 'application/json', 'size': AnyInt()},
'expiration': expiration1,
},
f'site:{pk1}:/snap.file': {
'value': '1',
'metadata': {
'size': 19,
'content_type': 'text/html',
'hash': 'WIFwflSwES+QG8g6H/usrI+rdOpGpvcGo+/F99TBxiU=',
},
'expiration': expiration1,
},
}
r = client.get('/testing/storage/', params={'prefix': f'site:{pk2}'})
assert r.status_code == 200, r.text
assert r.json() == {
f'site:{pk2}:/.smokeshow.json': {
'value': RegexStr(fr'\{{\n "url": "https://example.com/{pk2}/",\n.*'),
'metadata': {'content_type': 'application/json', 'size': AnyInt()},
'expiration': expiration2,
},
f'site:{pk2}:/different.file': {
'value': '1',
'metadata': {'size': 19, 'content_type': 'foo/bar', 'hash': 'WIFwflSwES+QG8g6H/usrI+rdOpGpvcGo+/F99TBxiU='},
'expiration': expiration2,
},
}
r = client.get('/testing/storage/', params={'prefix': 'file:WIFwflSwES'})
assert r.status_code == 200, r.text
assert r.json() == {
'file:WIFwflSwES+QG8g6H/usrI+rdOpGpvcGo+/F99TBxiU=': {
'value': 'this is a test file',
'metadata': {'path': '/different.file', 'public_key': pk2},
'expiration': expiration2,
}
}
def test_site_not_found(client: TestClient):
r = client.get('/0123456789abcdefghij/')
assert r.status_code == 404, r.text
assert r.text == '404: Site "0123456789abcdefghij" not found'
assert r.headers['content-type'] == 'text/plain;charset=UTF-8'
|
import unittest
from cava.utils.core import Variant
from cava.utils.csn import makeProteinString
class TestmakeProteinString(unittest.TestCase):
def test_makeProteinString_emptyProt(self):
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "", "MLX", 1)
expected = ('', ('.', '.', '.'))
print("Testing prot= empty\n")
self.assertEqual(actual, expected)
def test_makeProteinString_Syn(self):
variant = Variant("chr1", 1000, "C", "T")
# print("Testing Syn Change\n")
actual = makeProteinString(variant, "MRX", "MLX", 6)
expected = ('_p.Arg2Leu', ('2', 'R', 'L'))
self.assertEqual(actual, expected)
def test_makeProteinString_EarlyStopInMiddleInPhase(self):
variant = Variant("chr1", 1000, "C", "T")
print("Early Stop - 3rd pos of 6\n")
actual = makeProteinString(variant, "MYLRGX", "MYX", 9)
expected = ('_p.Leu3Ter', ('3', 'L', 'X'))
self.assertEqual(actual, expected)
def test_makeProteinString_EarlyStopInMiddleOutofPhase(self):
variant = Variant("chr1", 1000, "C", "T")
print("Early Stop - 3rd pos of 6\n")
actual = makeProteinString(variant, "MYLRGX", "MYX", 8)
expected = ('_p.Leu3Ter', ('3', 'L', 'X'))
self.assertEqual(actual, expected)
def test_makeProteinString_EarlyStopNearEnd(self):
variant = Variant("chr1", 1000, "C", "T")
print("Early Stop - 2nd pos of 3\n")
actual = makeProteinString(variant, "MRX", "MX", 4)
expected = ('_p.Arg2Ter', ('2', 'R', 'X'))
self.assertEqual(actual, expected)
def test_makeProteinString_delMet1(self):
print("Testing deletion of Initial Methionine")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "LRX", 1)
expected = ('_p.?', ('1', 'M', '-'))
self.assertEqual(actual, expected)
def test_makeProteinString_del2(self):
print("Testing deletion 2AA")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRYX", "MYX", 1)
expected = ('_p.Leu2_Arg3del', ('2-3', 'LR', '-'))
self.assertEqual(actual, expected)
def test_makeProteinString_del1(self):
print("Testing deletion of 1AA")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MRX", 1)
expected = ('_p.Leu2del', ('2', 'L', '-'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkMetIns4(self):
print("Testing checking insertion after Met")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MQYMLRX", 3)
expected = ('_p.Met1_Leu2insGlnTyrMet', ('1-2', '-', 'QYM'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkMetIns1(self):
print("Testing checking 1 AA insertion after Met")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MQLRX", 3)
expected = ('_p.Met1_Leu2insGln', ('1-2', '-', 'Q'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkInsTer(self):
print("Testing checking 3AA Insertion that contains a Ter in the middle")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRQX", "MLLXLRQX", 3)
expected = ('_p.Leu2_Arg3insLeuTer', ('2-3', '-', 'LX'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkShortExt(self):
print("Testing checking extension")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MLRQX", 12)
expected = ('_p.Ter4GlnextX2', ('4', 'X', 'QX'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkLongExtInPhase(self):
print("Testing checking long extension in phase")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MLRQLVYX", 12)
expected = ('_p.Ter4GlnextX5', ('4', 'X', 'QLVYX'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkLongExtnotPhase(self):
print("Testing checking long extension not in phase")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MLRQLVYX", 11)
expected = ('_p.Ter4GlnextX5', ('4', 'X', 'QLVYX'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkShortFS(self):
print("Testing checking frameshift with mutprot<prot")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRYQVRX", "MLSVX", 8)
expected = ('_p.Arg3SerfsTer3', ('3', 'R', 'SVX'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkDellookLikeShortFS(self):
print("Testing checking Deletion")
variant = Variant("chr1", 1000, "CTGGCTTCGGTCG", "C")
actual = makeProteinString(variant, "MLRYQVQX", "MLQX", 7)
expected = ('_p.Arg3_Val6del', ('3-6', 'RYQV', '-'))
self.assertEqual(actual, expected)
# Deletions near 3' end are deemed to include the Stop codon
def test_makeProteinString_checkDelasFS(self):
print("Testing checking check frameshift")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRYQVRX", "MLRVX", 6)
expected = ('_p.Tyr4ValfsTer2', ('4', 'Y', 'VX'))
self.assertEqual(actual, expected)
def test_makeProteinString_checkLongFS(self):
print("Testing checking long frameshift")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRYQVQX", "MLQYLVMSNX", 7)
expected = ('_p.Arg3GlnfsTer8', ('3', 'R', 'QYLVMSNX'))
self.assertEqual(actual, expected)
def test_makeProteinString_delins(self):
print("Testing checking delins")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRYQVQX", "MLRLVISVQX", 7)
expected = ('_p.Tyr4_Gln5delinsLeuValIleSer', ('4-5', 'YQ', 'LVIS'))
self.assertEqual(actual, expected)
def test_makeProteinString_delinswithTerAsFS(self):
print("Testing checking delins 1 AA becoming 3 (with Ter in middle) ")
variant = Variant("chr1", 1000, "C", "T")
# Q --> LXR
actual = makeProteinString(variant, "MLRYQVQX", "MLRYLXRVQX", 7)
expected = ('_p.Gln5delinsLeuTer', ('5', 'Q', 'LX'))
self.assertEqual(actual, expected)
# deletion-insertion variants starting N-terminal () of and including the translation termination (stop) codon are described as frame shift.
def test_makeProteinString_delinsasFS(self):
print("Testing checking delins affecting Stop Should be an FS ")
variant = Variant("chr1", 1000, "C", "T")
# Q --> LXR
actual = makeProteinString(variant, "MLRYQX", "MLRLRX", 7)
expected = ('_p.Tyr4LeufsTer3', ('4', 'Y', 'LRX'))
self.assertEqual(actual, expected)
def test_makeProteinString_extNoTer(self):
print("Testing checking delins")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MLRLVI", 7)
expected = ('_p.Ter4Leuext*?', ('4', 'X', 'LVI'))
self.assertEqual(actual, expected)
def test_makeProteinString_dup1(self):
print("Testing checking dup of 1 base")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MLLRX", 6)
expected = ('_p.Leu2dup', ('2-3', '-', 'L'))
self.assertEqual(actual, expected)
def test_makeProteinString_dup_AA(self):
print("Testing checking dup of pattern 2 AA long")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLYRX", "MLYLYRX", 6)
expected = ('_p.Leu2_Tyr3dup', ('3-4', '-', 'LY'))
self.assertEqual(actual, expected)
def test_makeProteinString_ssr2_gain1to3(self):
print("Testing checking ssr 1 to 3 copies of 2-long SSR")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLYRX", "MLYLYLYRX", 6)
expected = ('_p.Leu2_Tyr3[1]%3B[3]', ('3-4', '-', 'LYLY'))
self.assertEqual(actual, expected)
def test_makeProteinString_ssr2_gain0to3(self):
print("Testing checking ssr 0 to 3 copies of 2-long SSR .. should not be an SSR")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MRX", "MLYLYLYRX", 6)
expected = ('_p.Met1_Arg2insLeuTyrLeuTyrLeuTyr', ('1-2', '-', 'LYLYLY'))
self.assertEqual(actual, expected)
def test_makeProteinString_ssr2_loss2to1(self):
print("Testing checking ssr 2-long deletion")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLYLYRX", "MLYRX", 6)
expected = ('_p.Leu2_Tyr3[2]%3B[1]', ('4-5', 'LY', '-'))
self.assertEqual(actual, expected)
def test_makeProteinString_ssr2_loss2to0(self):
print("Testing checking ssr 2-long deletion to 0")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLYLYRX", "MRX", 6)
expected = ('_p.Leu2_Tyr3[2]%3B[0]', ('2-5', 'LYLY', '-'))
self.assertEqual(actual, expected)
def test_makeProteinString_ssr1_gain1to3(self):
print("Testing checking 1 to 3 copies of 1-long SSR")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "MLLLRX", 6)
expected = ('_p.Leu2[1]%3B[3]', ('2-3', '-', 'LL'))
self.assertEqual(actual, expected)
def test_makeProteinString_ssr1_loss2to1(self):
print("Testing checking ssr 2-long deletion")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLLRX", "MLRX", 6)
expected = ('_p.Leu2[2]%3B[1]', ('3', 'L', '-'))
self.assertEqual(actual, expected)
def test_makeProteinString_basic(self):
print("Testing checking basic change")
variant = Variant("chr17", 43045712, "T", "C")
REF = "MDLSALRVEEVQNVINAMQKILECPICLELIKEPVSTKCDHIFCKFCMLKLLNQKKGPSQCPLCKNDITKRSLQESTRFSQLVEELLKIICAFQLDTGLEYANSYNFAKKENNSPEHLKDEVSIIQSMGYRNRAKRLLQSEPENPSLQETSLSVQLSNLGTVRTLRTKQRIQPQKTSVYIELGSDSSEDTVNKATYCSVGDQELLQITPQGTRDEISLDSAKKAACEFSETDVTNTEHHQPSNNDLNTTEKRAAERHPEKYQGSSVSNLHVEPCGTNTHASSLQHENSSLLLTKDRMNVEKAEFCNKSKQPGLARSQHNRWAGSKETCNDRRTPSTEKKVDLNADPLCERKEWNKQKLPCSENPRDTEDVPWITLNSSIQKVNEWFSRSDELLGSDDSHDGESESNAKVADVLDVLNEVDEYSGSSEKIDLLASDPHEALICKSERVHSKSVESNIEDKIFGKTYRKKASLPNLSHVTENLIIGAFVTEPQIIQERPLTNKLKRKRRPTSGLHPEDFIKKADLAVQKTPEMINQGTNQTEQNGQVMNITNSGHENKTKGDSIQNEKNPNPIESLEKESAFKTKAEPISSSISNMELELNIHNSKAPKKNRLRRKSSTRHIHALELVVSRNLSPPNCTELQIDSCSSSEEIKKKKYNQMPVRHSRNLQLMEGKEPATGAKKSNKPNEQTSKRHDSDTFPELKLTNAPGSFTKCSNTSELKEFVNPSLPREEKEEKLETVKVSNNAEDPKDLMLSGERVLQTERSVESSSISLVPGTDYGTQESISLLEVSTLGKAKTEPNKCVSQCAAFENPKGLIHGCSKDNRNDTEGFKYPLGHEVNHSRETSIEMEESELDAQYLQNTFKVSKRQSFAPFSNPGNAEEECATFSAHSGSLKKQSPKVTFECEQKEENQGKNESNIKPVQTVNITAGFPVVGQKDKPVDNAKCSIKGGSRFCLSSQFRGNETGLITPNKHGLLQNPYRPPLFPIKSFVKTKCKKNLLEENFEEHSMSPEREMGNENIPSTVSTISRNNIRENVFKEASSSNINEVGSSTNEVGSSINEIGSSDENIQAELGRNRGPKLNAMLRLGVLQPEVYKQSLPGSNCKHPEIKKQEYEEVVQTVNTDFSPYLISDNLEQPMGSSHASQVCSETPDDLLDDGEIKEDTSFAENDIKESSAVFSKSVQKGELSRSPSPFTHTHLAQGYRRGAKKLESSEENLSSEDEELPCFQHLLFGKVNNIPSQSTRHSTVATECLSKNTEENLLSLKNSLNDCSNQVILAKASQEHHLSEETKCSASLFSSQCSELEDLTANTNTQDPFLIGSSKQMRHQSESQGVGLSDKELVSDDEERGTGLEENNQEEQSMDSNLGEAASGCESETSVSEDCSGLSSQSDILTTQQRDTMQHNLIKLQQEMAELEAVLEQHGSQPSNSYPSIISDSSALEDLRNPEQSTSEKAVLTSQKSSEYPISQNPEGLSADKFEVSADSSTSKNKEPGVERSSPSKCPSLDDRWYMHSCSGSLQNRNYPSQEELIKVVDVEEQQLEESGPHDLTETSYLPRQDLEGTPYLESGISLFSDDPESDPSEDRAPESARVGNIPSSTSALKVPQLKVAESAQSPAAAHTTDTAGYNAMEESVSREKPELTASTERVNKRMSMVVSGLTPEEFMLVYKFARKHHITLTNLITEETTHVVMKTDAEFVCERTLKYFLGIAGGKWVVSYFWVTQSIKERKMLNEHDFEVRGDVVNGRNHQGPKRARESQDRKIFRGLEICCYGPFTNMPTDQLEWMVQLCGASVVKELSSFTLGTGVHPIVVVQPDAWTEDNGFHAIGQMCEAPVVTREWVLDSVALYQCQELDTYLIPQIPHSHY"
ALT = "MDLSALRVEEVQNVINAMQKILECPICLELIKEPVSTKCDHIFCKFCMLKLLNQKKGPSQCPLCKNDITKRSLQESTRFSQLVEELLKIICAFQLDTGLEYANSYNFAKKENNSPEHLKDEVSIIQSMGYRNRAKRLLQSEPENPSLQETSLSVQLSNLGTVRTLRTKQRIQPQKTSVYIELGSDSSEDTVNKATYCSVGDQELLQITPQGTRDEISLDSAKKAACEFSETDVTNTEHHQPSNNDLNTTEKRAAERHPEKYQGSSVSNLHVEPCGTNTHASSLQHENSSLLLTKDRMNVEKAEFCNKSKQPGLARSQHNRWAGSKETCNDRRTPSTEKKVDLNADPLCERKEWNKQKLPCSENPRDTEDVPWITLNSSIQKVNEWFSRSDELLGSDDSHDGESESNAKVADVLDVLNEVDEYSGSSEKIDLLASDPHEALICKSERVHSKSVESNIEDKIFGKTYRKKASLPNLSHVTENLIIGAFVTEPQIIQERPLTNKLKRKRRPTSGLHPEDFIKKADLAVQKTPEMINQGTNQTEQNGQVMNITNSGHENKTKGDSIQNEKNPNPIESLEKESAFKTKAEPISSSISNMELELNIHNSKAPKKNRLRRKSSTRHIHALELVVSRNLSPPNCTELQIDSCSSSEEIKKKKYNQMPVRHSRNLQLMEGKEPATGAKKSNKPNEQTSKRHDSDTFPELKLTNAPGSFTKCSNTSELKEFVNPSLPREEKEEKLETVKVSNNAEDPKDLMLSGERVLQTERSVESSSISLVPGTDYGTQESISLLEVSTLGKAKTEPNKCVSQCAAFENPKGLIHGCSKDNRNDTEGFKYPLGHEVNHSRETSIEMEESELDAQYLQNTFKVSKRQSFAPFSNPGNAEEECATFSAHSGSLKKQSPKVTFECEQKEENQGKNESNIKPVQTVNITAGFPVVGQKDKPVDNAKCSIKGGSRFCLSSQFRGNETGLITPNKHGLLQNPYRPPLFPIKSFVKTKCKKNLLEENFEEHSMSPEREMGNENIPSTVSTISRNNIRENVFKEASSSNINEVGSSTNEVGSSINEIGSSDENIQAELGRNRGPKLNAMLRLGVLQPEVYKQSLPGSNCKHPEIKKQEYEEVVQTVNTDFSPYLISDNLEQPMGSSHASQVCSETPDDLLDDGEIKEDTSFAENDIKESSAVFSKSVQKGELSRSPSPFTHTHLAQGYRRGAKKLESSEENLSSEDEELPCFQHLLFGKVNNIPSQSTRHSTVATECLSKNTEENLLSLKNSLNDCSNQVILAKASQEHHLSEETKCSASLFSSQCSELEDLTANTNTQDPFLIGSSKQMRHQSESQGVGLSDKELVSDDEERGTGLEENNQEEQSMDSNLGEAASGCESETSVSEDCSGLSSQSDILTTQQRDTMQHNLIKLQQEMAELEAVLEQHGSQPSNSYPSIISDSSALEDLRNPEQSTSEKAVLTSQKSSEYPISQNPEGLSADKFEVSADSSTSKNKEPGVERSSPSKCPSLDDRWYMHSCSGSLQNRNYPSQEELIKVVDVEEQQLEESGPHDLTETSYLPRQDLEGTPYLESGISLFSDDPESDPSEDRAPESARVGNIPSSTSALKVPQLKVAESAQSPAAAHTTDTAGYNAMEESVSREKPELTASTERVNKRMSMVVSGLTPEEFMLVYKFARKHHITLTNLITEETTHVVMKTDAEFVCERTLKYFLGIAGGKWVVSYFWVTQSIKERKMLNEHDFEVRGDVVNGRNHQGPKRARESQDRKIFRGLEICCYGPFTNMPTDQLEWMVQLCGASVVKELSSFTLGTGVHPIVVVQPDAWTEDNGFHAIGQMCEAPVVTREWVLDSVALYQCQELDTCLIPQIPHSHY"
actual = makeProteinString(variant, REF, ALT, 5558)
expected = ('_p.Tyr1852Cys', ('1852', 'Y', 'C'))
self.assertEqual(actual, expected)
def test_makeProteinString_EarlyStopInMiddle3BP(self):
# Note variant is not being used.. just possibly frame
variant = Variant("chr1", 1000, "C", "TTCA")
print("Early Stop - 3rd pos of 6, from in phase insertion\n")
actual = makeProteinString(variant, "MYLRGX", "MYX", 9)
expected = ('_p.Leu3Ter', ('3', 'L', 'X'))
self.assertEqual(actual, expected)
def test_makeProteinString_Met1toAA(self):
print("Testing mutation of Methionine")
variant = Variant("chr1", 1000, "C", "T")
actual = makeProteinString(variant, "MLRX", "LLRX", 1)
expected = ('_p.?', ('1', 'M', 'L'))
self.assertEqual(actual, expected)
def test_makeProteinString_Met1toAAS(self):
print("Testing mutation of 2 AA (including Methionine)")
variant = Variant("chr1", 1000, "CTCT", "TCAG")
actual = makeProteinString(variant, "MLRX", "LRRX", 1)
expected = ('_p.?', ('1-2', 'ML', 'LR'))
self.assertEqual(actual, expected) |
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
extras_require = {
"sklearn": [
"scikit-learn>=0.21.2",
],
"tasks": [
"openml==0.9.0",
"kaggle",
"psutil",
],
"experiments": [
"yamlordereddictloader",
]
}
extras_require["all"] = sorted(
{v for req in extras_require.values() for v in req}
)
setup(
name="meta-ml",
version="0.0.24",
description="MetaRL-based Estimator using Task-encodings for AutoML",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/cosmicBboy/ml-research/tree/master/metalearn",
packages=[
"metalearn",
"metalearn.components",
"metalearn.data_environments",
"metalearn.data_environments.feature_maps",
"metalearn.inference"],
install_requires=[
"click==7.0",
"dill==0.3.1.1",
"numpy",
"pandas==1.0.5",
"torch==1.5.0",
"scipy",
],
extras_require=extras_require,
scripts=["bin/metalearn"],
)
|
import numpy as np
import PIL.Image as image
import matplotlib.pyplot as plt
F=image.open('test.png')
F = np.array(F)
N=F.shape[-1]
y=np.arange(N)
fft=1j*np.zeros((N,N))
for i in range(N):
for j in range(N):
P = np.exp(-1j * y / N * i * 2 * np.pi)
Q = np.exp(-1j * y / N * j * 2 * np.pi)
fft[i,j]=P@F@Q.T
plt.imshow(fft.__abs__(),cmap='gray')
plt.show() |
"""
The Snap7 Python library.
"""
import pkg_resources
import snap7.client as client
import snap7.common as common
import snap7.error as error
import snap7.logo as logo
import snap7.server as server
import snap7.types as types
import snap7.util as util
__all__ = ['client', 'common', 'error', 'logo', 'server', 'types', 'util']
try:
__version__ = pkg_resources.require("python-snap7")[0].version
except pkg_resources.DistributionNotFound:
__version__ = "0.0rc0"
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.beans
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
class XFastPropertySet(XInterface_8f010a43):
"""
provides a fast way of accessing and changing property values.
This interface is an extension to the XPropertySet interface. The get and set methods use handles to access the property values instead of character strings.
See Also:
`API XFastPropertySet <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1beans_1_1XFastPropertySet.html>`_
"""
__ooo_ns__: str = 'com.sun.star.beans'
__ooo_full_ns__: str = 'com.sun.star.beans.XFastPropertySet'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.beans.XFastPropertySet'
@abstractmethod
def getFastPropertyValue(self, nHandle: int) -> object:
"""
Raises:
com.sun.star.beans.UnknownPropertyException: ``UnknownPropertyException``
com.sun.star.lang.WrappedTargetException: ``WrappedTargetException``
"""
@abstractmethod
def setFastPropertyValue(self, nHandle: int, aValue: object) -> None:
"""
sets the value to the property with the specified name.
Raises:
com.sun.star.beans.UnknownPropertyException: ``UnknownPropertyException``
com.sun.star.beans.PropertyVetoException: ``PropertyVetoException``
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
com.sun.star.lang.WrappedTargetException: ``WrappedTargetException``
"""
__all__ = ['XFastPropertySet']
|
from dynaconf import Dynaconf
settings = Dynaconf(settings_file="settings.toml", environments=True)
print(settings.A)
print(settings.B)
print(settings.NAMESPACE.A)
print(settings.NAMESPACE.B)
|
from git_hub_issue_commits import GitHubIssueCommits
import csv
import json
import os
class NoFileExtensionException(Exception):
def __init__(self, file_address):
super(NoFileExtensionException, self).__init__('Please provide extension for following file: (' +
file_address + ')')
class IO:
def __init__(self):
pass
@staticmethod
def read_issue_commits(file_address, file_extension=None, tuple_keep_filter_attribute=None, keep=True,
ignore_field_without_value=True):
# type: (str, str) -> IGitHubFixCommit
"""
:param: file_address
:param: file_extension can be txt or csv or json
:param: tuple_keep_filter_attribute tuple filed value and condition for keep or filter issue
:param: keep if it is true keep with the condition otherwise filter the issue
"""
def invert_condition(condition, invert):
if invert:
return condition
else:
return not condition
if not file_extension:
file_extension = file_address.split('.').pop().lower()
if file_extension not in ['txt', 'csv', 'json']:
raise NoFileExtensionException(file_address=file_address)
def utf8_encode(to_encode):
if type(to_encode) is list:
return map(lambda t: t.encode('utf8'), to_encode)
elif type(to_encode) is unicode:
to_encode = to_encode.encode('utf8')
return to_encode
else:
return to_encode
with open(file_address) as file_handler:
if file_extension == 'txt':
# Ignore first line (first line is header)
file_handler.readline()
for line in file_handler:
split_line = line.split(',')
# skip if the filter attribute is not matched(check if the value is not inside the dictionary)
if tuple_keep_filter_attribute and \
invert_condition((tuple_keep_filter_attribute[1]
not in split_line[int(tuple_keep_filter_attribute[0])]), keep):
continue
git_hub_issue_commit = GitHubIssueCommits()
git_hub_issue_commit.init_from_text(line)
yield git_hub_issue_commit
elif file_extension == 'csv':
cvs_reader = csv.DictReader(file_handler)
# print cvs_reader.next()
for row_dictionary in cvs_reader:
# row_dictionary = dict(map(lambda (k, v): (k, unicode(v, 'utf-8')), row_dictionary.iteritems()))
# row_dictionary = dict(map(lambda (k, v): (k, v.decode('utf-8')), row_dictionary.iteritems()))
# ignore field without value of filter attribute
if ignore_field_without_value and tuple_keep_filter_attribute \
and tuple_keep_filter_attribute[0] not in row_dictionary:
continue
# skip if the filter attribute is not matched(check if the value is not inside the dictionary)
if tuple_keep_filter_attribute and tuple_keep_filter_attribute[0] in row_dictionary and\
invert_condition(tuple_keep_filter_attribute[1]
not in row_dictionary[tuple_keep_filter_attribute[0]], keep):
continue
git_hub_issue_commit = GitHubIssueCommits()
git_hub_issue_commit.init_from_dictionary(row_dictionary)
yield git_hub_issue_commit
elif file_extension == 'json':
file_handler.readline()
for json_txt in file_handler:
json_line = json.loads(json_txt, encoding='utf8')
json_line = {k: utf8_encode(v) for k, v in json_line.items()}
# ignore field without value of filter attribute
if ignore_field_without_value and tuple_keep_filter_attribute \
and tuple_keep_filter_attribute[0] not in json_line:
continue
# skip if the filter attribute is not matched(check if the value is not inside the dictionary)
if tuple_keep_filter_attribute and tuple_keep_filter_attribute[0] in json_line and \
invert_condition(tuple_keep_filter_attribute[1]
not in json_line[tuple_keep_filter_attribute[0]], keep):
continue
git_hub_issue_commit = GitHubIssueCommits()
git_hub_issue_commit.init_from_dictionary(json_line)
yield git_hub_issue_commit
|
from dataclasses import InitVar, dataclass, asdict, field
from typing import Dict, Any, List, Set
from enum import Enum
class HighValuesAre(str, Enum):
bad = "bad"
good = "good"
neutral = "neutral"
class SignalFormat(str, Enum):
per100k = "per100k"
percent = "percent"
fraction = "fraction"
raw_count = "raw_count"
raw = "raw"
class SignalCategory(str, Enum):
public = "public"
early = "early"
late = "late"
other = "other"
def guess_name(source: str, signal: str, is_weighted: bool) -> str:
clean_signal = signal
if is_weighted and source == "fb-survey":
clean_signal = signal.replace("smoothed_w", "smoothed_weighted_").replace("raw_w", "raw_weighted_")
return " ".join((s.capitalize() for s in clean_signal.split("_"))).replace(" Ili", " ILI").replace(" Cli", " CLI").replace("Dont", "Do Not")
def guess_high_values_are(source: str, signal: str) -> HighValuesAre:
if signal.endswith("_ili") or signal.endswith("_wili") or signal.endswith("_cli") or signal.endswith("_wcli"):
return HighValuesAre.bad
if source == "chng" and signal.endswith("_covid"):
return HighValuesAre.bad
if source == "covid-act-now":
if signal.endswith("_positivity_rate"):
return HighValuesAre.bad
if signal.endswith("_total_tests"):
return HighValuesAre.good
if source == "fb-survey":
if "tested_positive" in signal:
return HighValuesAre.bad
if "anxious" in signal or "depressed" in signal or "felt_isolated" in signal or "worried" in signal:
return HighValuesAre.bad
if "hesitancy_reason" in signal or "vaccine_likely" in signal or "dontneed_reason" in signal:
return HighValuesAre.neutral
if "mask" in signal or "vaccine" in signal or "vaccinated" in signal:
return HighValuesAre.good
if source in ["quidel", "indicator-combination", "google-symptoms", "doctor-visits", "hospital-admissions", "usa-facts", "jhu-csse", "hhs"]:
return HighValuesAre.bad
return HighValuesAre.neutral
def guess_format(source: str, signal: str) -> SignalFormat:
if source in ["fb-survey", "quidel", "hospital-admissions"]:
return SignalFormat.percent
if source == "safegraph" and (signal.endswith("_prop") or signal.endswith("_prop_7dav")):
return SignalFormat.per100k
if source in ["indicator-combination", "usa-facts", "jhu-csse"] and signal.endswith("_prop"):
return SignalFormat.per100k
if source in ["indicator-combination", "usa-facts", "jhu-csse"] and signal.endswith("_num"):
return SignalFormat.raw_count
if source == "covid-act-now" and signal == "pcr_specimen_positivity_rate":
return SignalFormat.fraction
if source == "covid-act-now" and signal == "pcr_specimen_total_tests":
return SignalFormat.raw_count
return SignalFormat.raw
def guess_category(source: str, signal: str) -> SignalCategory:
if source in ["doctor-visits"] or (source == "fb-survey" and (signal.endswith("_ili") or signal.endswith("_cli"))):
return SignalCategory.early
if source in ["fb-survey", "safegraph", "google-symptoms"]:
return SignalCategory.public
if source in ["quidel", "hospital-admissions", "indicator-combination", "usa-facts", "jhu-csse", "hhs", "chng"]:
return SignalCategory.late
return SignalCategory.other
def guess_is_smoothed(signal: str) -> bool:
return "smoothed_" in signal or "7dav" in signal
def guess_is_cumulative(signal: str) -> bool:
return "cumulative_" in signal
def guess_is_weighted(source: str, signal: str) -> bool:
if source == "fb-survey" and signal.startswith("smoothed_w"):
rest = signal[len("smoothed_") :]
if rest.startswith("wanted") or rest.startswith("wearing") or rest.startswith("work") or rest.startswith("worried"):
# it is smoothed_wanted but the weighted one is smoothed_wwanted
return False
return True
if source == "fb-survey" and signal.startswith("raw_w"):
return True
if source == "chng" and signal.startswith("smoothed_adj_"):
return True
return False
def guess_has_stderr(source: str) -> bool:
return source in ["fb-survey", "quidel"]
def guess_has_sample_size(source: str) -> bool:
return source in ["fb-survey", "quidel"]
@dataclass
class CovidcastMetaStats:
min: float
mean: float
stdev: float
max: float
AllSignalsMap = Dict[str, Set[str]]
def guess_related_fb_survey_like(entry: "CovidcastMetaEntry", weighted_infix: str = "w") -> Set[str]:
# compute the plain smoothed version and go from there
smoothed_version = entry.signal
if entry.is_weighted:
# guess the smoothed unweighted version
smoothed_version = entry.signal.replace("smoothed_" + weighted_infix, "smoothed_").replace("raw_" + weighted_infix, "smoothed_")
elif not entry.is_smoothed:
smoothed_version = entry.signal.replace("raw_", "smoothed_")
related: Set[str] = set()
related.add(smoothed_version)
weighted_smoothed_signal = smoothed_version.replace("smoothed_", "smoothed_" + weighted_infix)
related.add(weighted_smoothed_signal)
raw_signal = smoothed_version.replace("smoothed_", "raw_")
related.add(raw_signal)
weighted_raw_signal = smoothed_version.replace("smoothed_", "raw_" + weighted_infix)
related.add(weighted_raw_signal)
return related
def guess_related_cases_death_like(entry: "CovidcastMetaEntry") -> Set[str]:
if entry.is_weighted:
return set() # cannot handle
base_prefix = entry.signal[0 : entry.signal.index("_")]
related: Set[str] = set()
for format in [SignalFormat.raw_count, SignalFormat.per100k]:
suffix = "num" if format == SignalFormat.raw_count else "prop"
incidence_count = f"{base_prefix}_incidence_{suffix}"
related.add(incidence_count)
incidence_cumulative_count = f"{base_prefix}_cumulative_{suffix}"
related.add(incidence_cumulative_count)
smoothed_incidence_count = f"{base_prefix}_7dav_incidence_{suffix}"
related.add(smoothed_incidence_count)
smoothed_incidence_cumulative_count = f"{base_prefix}_7dav_cumulative_{suffix}"
related.add(smoothed_incidence_cumulative_count)
return related
def guess_related_safegraph(entry: "CovidcastMetaEntry") -> Set[str]:
if entry.is_weighted:
return set() # cannot handle
if entry.signal.startswith("median_home_dwell_time"):
return {"median_home_dwell_time", "median_home_dwell_time_7dav"}
base_prefix = entry.signal.replace("_7dav", "").replace("_prop", "").replace("_num", "")
related: Set[str] = set()
for format in [SignalFormat.raw_count, SignalFormat.per100k]:
suffix = "num" if format == SignalFormat.raw_count else "prop"
incidence_count = f"{base_prefix}_{suffix}"
related.add(incidence_count)
smoothed_incidence_count = f"{base_prefix}_{suffix}_7dav"
related.add(smoothed_incidence_count)
return related
def guess_related_generic(entry: "CovidcastMetaEntry") -> Set[str]:
if entry.is_weighted or entry.is_cumulative:
return set() # don't know
if entry.is_smoothed:
raw_version = entry.signal.replace("smoothed_", "raw_")
return {raw_version}
else:
smoothed_version = entry.signal.replace("raw_", "smoothed_")
return {smoothed_version}
def guess_related_signals(entry: "CovidcastMetaEntry", all_signals: AllSignalsMap) -> List[str]:
if entry.source == "indicator-combination" and entry.signal.startswith("nmf_"):
return []
guesses: Set[str] = set()
if entry.source == "fb-survey":
guesses = guess_related_fb_survey_like(entry, "w")
elif entry.source in ["chng", "doctor-visits", "hospital-admissions"]:
guesses = guess_related_fb_survey_like(entry, "adj_")
elif entry.source == "safegraph":
guesses = guess_related_safegraph(entry)
elif entry.source in ["indicator-combination", "usa-facts", "jhu-csse"]:
guesses = guess_related_cases_death_like(entry)
else:
guesses = guess_related_generic(entry)
# remove oneself
guesses.discard(entry.signal)
# return just valid signals
same_source_signals = all_signals.get(entry.source, set())
return sorted(guesses.intersection(same_source_signals))
@dataclass
class CovidcastMetaEntry:
source: str
signal: str
min_time: int
max_time: int
max_issue: int
geo_types: Dict[str, CovidcastMetaStats]
name: str = field(init=False)
high_values_are: HighValuesAre = field(init=False)
format: SignalFormat = field(init=False)
category: SignalCategory = field(init=False)
is_smoothed: bool = field(init=False)
is_weighted: bool = field(init=False)
is_cumulative: bool = field(init=False)
has_stderr: bool = field(init=False)
has_sample_size: bool = field(init=False)
related_signals: List[str] = field(init=False)
all_signals: InitVar[AllSignalsMap]
def __post_init__(self, all_signals: AllSignalsMap):
# derive fields
self.high_values_are = guess_high_values_are(self.source, self.signal)
self.format = guess_format(self.source, self.signal)
self.category = guess_category(self.source, self.signal)
self.is_smoothed = guess_is_smoothed(self.signal)
self.is_weighted = guess_is_weighted(self.source, self.signal)
self.is_cumulative = guess_is_cumulative(self.signal)
self.has_stderr = guess_has_stderr(self.source)
self.has_sample_size = guess_has_sample_size(self.source)
self.related_signals = guess_related_signals(self, all_signals)
self.name = guess_name(self.source, self.signal, self.is_weighted)
def intergrate(self, row: Dict[str, Any]):
if row["min_time"] < self.min_time:
self.min_time = row["min_time"]
if row["max_time"] > self.max_time:
self.max_time = row["max_time"]
if row["max_issue"] > self.max_issue:
self.max_issue = row["max_issue"]
self.geo_types[row["geo_type"]] = CovidcastMetaStats(row["min_value"], row["mean_value"], row["stdev_value"], row["max_value"])
def asdict(self):
r = asdict(self)
r["geo_types"] = {k: asdict(v) for k, v in self.geo_types.items()}
return r
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SyntheticsTestArgs', 'SyntheticsTest']
@pulumi.input_type
class SyntheticsTestArgs:
def __init__(__self__, *,
locations: pulumi.Input[Sequence[pulumi.Input[str]]],
name: pulumi.Input[str],
status: pulumi.Input[str],
type: pulumi.Input[str],
api_steps: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]]] = None,
assertions: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]]] = None,
browser_steps: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]]] = None,
browser_variables: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]]] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]]] = None,
device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
message: Optional[pulumi.Input[str]] = None,
options_list: Optional[pulumi.Input['SyntheticsTestOptionsListArgs']] = None,
request_basicauth: Optional[pulumi.Input['SyntheticsTestRequestBasicauthArgs']] = None,
request_client_certificate: Optional[pulumi.Input['SyntheticsTestRequestClientCertificateArgs']] = None,
request_definition: Optional[pulumi.Input['SyntheticsTestRequestDefinitionArgs']] = None,
request_headers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
request_query: Optional[pulumi.Input[Mapping[str, Any]]] = None,
set_cookie: Optional[pulumi.Input[str]] = None,
subtype: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SyntheticsTest resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: Array of locations used to run the test. Refer to [Datadog documentation](https://docs.datadoghq.com/synthetics/api_test/#request) for available locations (e.g. `aws:eu-central-1`).
:param pulumi.Input[str] name: Name of Datadog synthetics test.
:param pulumi.Input[str] status: Define whether you want to start (`live`) or pause (`paused`) a Synthetic test. Valid values are `live`, `paused`.
:param pulumi.Input[str] type: Synthetics test type. Valid values are `api`, `browser`.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]] api_steps: Steps for multistep api tests
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]] assertions: Assertions used for the test. Multiple `assertion` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]] browser_steps: Steps for browser tests.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]] browser_variables: Variables used for a browser test steps. Multiple `variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]] config_variables: Variables used for the test configuration. Multiple `config_variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] device_ids: Required if `type = "browser"`. Array with the different device IDs used to run the test. Valid values are `laptop_large`, `tablet`, `mobile_small`, `chrome.laptop_large`, `chrome.tablet`, `chrome.mobile_small`, `firefox.laptop_large`, `firefox.tablet`, `firefox.mobile_small`, `edge.laptop_large`, `edge.tablet`, `edge.mobile_small`.
:param pulumi.Input[str] message: A message to include with notifications for this synthetics test. Email notifications can be sent to specific users by using the same `@username` notation as events.
:param pulumi.Input['SyntheticsTestRequestBasicauthArgs'] request_basicauth: The HTTP basic authentication credentials. Exactly one nested block is allowed with the structure below.
:param pulumi.Input['SyntheticsTestRequestClientCertificateArgs'] request_client_certificate: Client certificate to use when performing the test request. Exactly one nested block is allowed with the structure below.
:param pulumi.Input['SyntheticsTestRequestDefinitionArgs'] request_definition: Required if `type = "api"`. The synthetics test request.
:param pulumi.Input[Mapping[str, Any]] request_headers: Header name and value map.
:param pulumi.Input[Mapping[str, Any]] request_query: Query arguments name and value map.
:param pulumi.Input[str] set_cookie: Cookies to be used for a browser test request, using the [Set-Cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie) syntax.
:param pulumi.Input[str] subtype: The subtype of the Synthetic API test. Defaults to `http`. Valid values are `http`, `ssl`, `tcp`, `dns`, `multi`, `icmp`, `udp`, `websocket`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tags to associate with your synthetics test. This can help you categorize and filter tests in the manage synthetics page of the UI. Default is an empty list (`[]`).
"""
pulumi.set(__self__, "locations", locations)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if api_steps is not None:
pulumi.set(__self__, "api_steps", api_steps)
if assertions is not None:
pulumi.set(__self__, "assertions", assertions)
if browser_steps is not None:
pulumi.set(__self__, "browser_steps", browser_steps)
if browser_variables is not None:
pulumi.set(__self__, "browser_variables", browser_variables)
if config_variables is not None:
pulumi.set(__self__, "config_variables", config_variables)
if device_ids is not None:
pulumi.set(__self__, "device_ids", device_ids)
if message is not None:
pulumi.set(__self__, "message", message)
if options_list is not None:
pulumi.set(__self__, "options_list", options_list)
if request_basicauth is not None:
pulumi.set(__self__, "request_basicauth", request_basicauth)
if request_client_certificate is not None:
pulumi.set(__self__, "request_client_certificate", request_client_certificate)
if request_definition is not None:
pulumi.set(__self__, "request_definition", request_definition)
if request_headers is not None:
pulumi.set(__self__, "request_headers", request_headers)
if request_query is not None:
pulumi.set(__self__, "request_query", request_query)
if set_cookie is not None:
pulumi.set(__self__, "set_cookie", set_cookie)
if subtype is not None:
pulumi.set(__self__, "subtype", subtype)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def locations(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Array of locations used to run the test. Refer to [Datadog documentation](https://docs.datadoghq.com/synthetics/api_test/#request) for available locations (e.g. `aws:eu-central-1`).
"""
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of Datadog synthetics test.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
Define whether you want to start (`live`) or pause (`paused`) a Synthetic test. Valid values are `live`, `paused`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Synthetics test type. Valid values are `api`, `browser`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="apiSteps")
def api_steps(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]]]:
"""
Steps for multistep api tests
"""
return pulumi.get(self, "api_steps")
@api_steps.setter
def api_steps(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]]]):
pulumi.set(self, "api_steps", value)
@property
@pulumi.getter
def assertions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]]]:
"""
Assertions used for the test. Multiple `assertion` blocks are allowed with the structure below.
"""
return pulumi.get(self, "assertions")
@assertions.setter
def assertions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]]]):
pulumi.set(self, "assertions", value)
@property
@pulumi.getter(name="browserSteps")
def browser_steps(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]]]:
"""
Steps for browser tests.
"""
return pulumi.get(self, "browser_steps")
@browser_steps.setter
def browser_steps(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]]]):
pulumi.set(self, "browser_steps", value)
@property
@pulumi.getter(name="browserVariables")
def browser_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]]]:
"""
Variables used for a browser test steps. Multiple `variable` blocks are allowed with the structure below.
"""
return pulumi.get(self, "browser_variables")
@browser_variables.setter
def browser_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]]]):
pulumi.set(self, "browser_variables", value)
@property
@pulumi.getter(name="configVariables")
def config_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]]]:
"""
Variables used for the test configuration. Multiple `config_variable` blocks are allowed with the structure below.
"""
return pulumi.get(self, "config_variables")
@config_variables.setter
def config_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]]]):
pulumi.set(self, "config_variables", value)
@property
@pulumi.getter(name="deviceIds")
def device_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Required if `type = "browser"`. Array with the different device IDs used to run the test. Valid values are `laptop_large`, `tablet`, `mobile_small`, `chrome.laptop_large`, `chrome.tablet`, `chrome.mobile_small`, `firefox.laptop_large`, `firefox.tablet`, `firefox.mobile_small`, `edge.laptop_large`, `edge.tablet`, `edge.mobile_small`.
"""
return pulumi.get(self, "device_ids")
@device_ids.setter
def device_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "device_ids", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
A message to include with notifications for this synthetics test. Email notifications can be sent to specific users by using the same `@username` notation as events.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter(name="optionsList")
def options_list(self) -> Optional[pulumi.Input['SyntheticsTestOptionsListArgs']]:
return pulumi.get(self, "options_list")
@options_list.setter
def options_list(self, value: Optional[pulumi.Input['SyntheticsTestOptionsListArgs']]):
pulumi.set(self, "options_list", value)
@property
@pulumi.getter(name="requestBasicauth")
def request_basicauth(self) -> Optional[pulumi.Input['SyntheticsTestRequestBasicauthArgs']]:
"""
The HTTP basic authentication credentials. Exactly one nested block is allowed with the structure below.
"""
return pulumi.get(self, "request_basicauth")
@request_basicauth.setter
def request_basicauth(self, value: Optional[pulumi.Input['SyntheticsTestRequestBasicauthArgs']]):
pulumi.set(self, "request_basicauth", value)
@property
@pulumi.getter(name="requestClientCertificate")
def request_client_certificate(self) -> Optional[pulumi.Input['SyntheticsTestRequestClientCertificateArgs']]:
"""
Client certificate to use when performing the test request. Exactly one nested block is allowed with the structure below.
"""
return pulumi.get(self, "request_client_certificate")
@request_client_certificate.setter
def request_client_certificate(self, value: Optional[pulumi.Input['SyntheticsTestRequestClientCertificateArgs']]):
pulumi.set(self, "request_client_certificate", value)
@property
@pulumi.getter(name="requestDefinition")
def request_definition(self) -> Optional[pulumi.Input['SyntheticsTestRequestDefinitionArgs']]:
"""
Required if `type = "api"`. The synthetics test request.
"""
return pulumi.get(self, "request_definition")
@request_definition.setter
def request_definition(self, value: Optional[pulumi.Input['SyntheticsTestRequestDefinitionArgs']]):
pulumi.set(self, "request_definition", value)
@property
@pulumi.getter(name="requestHeaders")
def request_headers(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Header name and value map.
"""
return pulumi.get(self, "request_headers")
@request_headers.setter
def request_headers(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "request_headers", value)
@property
@pulumi.getter(name="requestQuery")
def request_query(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Query arguments name and value map.
"""
return pulumi.get(self, "request_query")
@request_query.setter
def request_query(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "request_query", value)
@property
@pulumi.getter(name="setCookie")
def set_cookie(self) -> Optional[pulumi.Input[str]]:
"""
Cookies to be used for a browser test request, using the [Set-Cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie) syntax.
"""
return pulumi.get(self, "set_cookie")
@set_cookie.setter
def set_cookie(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "set_cookie", value)
@property
@pulumi.getter
def subtype(self) -> Optional[pulumi.Input[str]]:
"""
The subtype of the Synthetic API test. Defaults to `http`. Valid values are `http`, `ssl`, `tcp`, `dns`, `multi`, `icmp`, `udp`, `websocket`.
"""
return pulumi.get(self, "subtype")
@subtype.setter
def subtype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subtype", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of tags to associate with your synthetics test. This can help you categorize and filter tests in the manage synthetics page of the UI. Default is an empty list (`[]`).
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _SyntheticsTestState:
def __init__(__self__, *,
api_steps: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]]] = None,
assertions: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]]] = None,
browser_steps: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]]] = None,
browser_variables: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]]] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]]] = None,
device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
message: Optional[pulumi.Input[str]] = None,
monitor_id: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
options_list: Optional[pulumi.Input['SyntheticsTestOptionsListArgs']] = None,
request_basicauth: Optional[pulumi.Input['SyntheticsTestRequestBasicauthArgs']] = None,
request_client_certificate: Optional[pulumi.Input['SyntheticsTestRequestClientCertificateArgs']] = None,
request_definition: Optional[pulumi.Input['SyntheticsTestRequestDefinitionArgs']] = None,
request_headers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
request_query: Optional[pulumi.Input[Mapping[str, Any]]] = None,
set_cookie: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subtype: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SyntheticsTest resources.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]] api_steps: Steps for multistep api tests
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]] assertions: Assertions used for the test. Multiple `assertion` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]] browser_steps: Steps for browser tests.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]] browser_variables: Variables used for a browser test steps. Multiple `variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]] config_variables: Variables used for the test configuration. Multiple `config_variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] device_ids: Required if `type = "browser"`. Array with the different device IDs used to run the test. Valid values are `laptop_large`, `tablet`, `mobile_small`, `chrome.laptop_large`, `chrome.tablet`, `chrome.mobile_small`, `firefox.laptop_large`, `firefox.tablet`, `firefox.mobile_small`, `edge.laptop_large`, `edge.tablet`, `edge.mobile_small`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: Array of locations used to run the test. Refer to [Datadog documentation](https://docs.datadoghq.com/synthetics/api_test/#request) for available locations (e.g. `aws:eu-central-1`).
:param pulumi.Input[str] message: A message to include with notifications for this synthetics test. Email notifications can be sent to specific users by using the same `@username` notation as events.
:param pulumi.Input[int] monitor_id: ID of the monitor associated with the Datadog synthetics test.
:param pulumi.Input[str] name: Name of Datadog synthetics test.
:param pulumi.Input['SyntheticsTestRequestBasicauthArgs'] request_basicauth: The HTTP basic authentication credentials. Exactly one nested block is allowed with the structure below.
:param pulumi.Input['SyntheticsTestRequestClientCertificateArgs'] request_client_certificate: Client certificate to use when performing the test request. Exactly one nested block is allowed with the structure below.
:param pulumi.Input['SyntheticsTestRequestDefinitionArgs'] request_definition: Required if `type = "api"`. The synthetics test request.
:param pulumi.Input[Mapping[str, Any]] request_headers: Header name and value map.
:param pulumi.Input[Mapping[str, Any]] request_query: Query arguments name and value map.
:param pulumi.Input[str] set_cookie: Cookies to be used for a browser test request, using the [Set-Cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie) syntax.
:param pulumi.Input[str] status: Define whether you want to start (`live`) or pause (`paused`) a Synthetic test. Valid values are `live`, `paused`.
:param pulumi.Input[str] subtype: The subtype of the Synthetic API test. Defaults to `http`. Valid values are `http`, `ssl`, `tcp`, `dns`, `multi`, `icmp`, `udp`, `websocket`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tags to associate with your synthetics test. This can help you categorize and filter tests in the manage synthetics page of the UI. Default is an empty list (`[]`).
:param pulumi.Input[str] type: Synthetics test type. Valid values are `api`, `browser`.
"""
if api_steps is not None:
pulumi.set(__self__, "api_steps", api_steps)
if assertions is not None:
pulumi.set(__self__, "assertions", assertions)
if browser_steps is not None:
pulumi.set(__self__, "browser_steps", browser_steps)
if browser_variables is not None:
pulumi.set(__self__, "browser_variables", browser_variables)
if config_variables is not None:
pulumi.set(__self__, "config_variables", config_variables)
if device_ids is not None:
pulumi.set(__self__, "device_ids", device_ids)
if locations is not None:
pulumi.set(__self__, "locations", locations)
if message is not None:
pulumi.set(__self__, "message", message)
if monitor_id is not None:
pulumi.set(__self__, "monitor_id", monitor_id)
if name is not None:
pulumi.set(__self__, "name", name)
if options_list is not None:
pulumi.set(__self__, "options_list", options_list)
if request_basicauth is not None:
pulumi.set(__self__, "request_basicauth", request_basicauth)
if request_client_certificate is not None:
pulumi.set(__self__, "request_client_certificate", request_client_certificate)
if request_definition is not None:
pulumi.set(__self__, "request_definition", request_definition)
if request_headers is not None:
pulumi.set(__self__, "request_headers", request_headers)
if request_query is not None:
pulumi.set(__self__, "request_query", request_query)
if set_cookie is not None:
pulumi.set(__self__, "set_cookie", set_cookie)
if status is not None:
pulumi.set(__self__, "status", status)
if subtype is not None:
pulumi.set(__self__, "subtype", subtype)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="apiSteps")
def api_steps(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]]]:
"""
Steps for multistep api tests
"""
return pulumi.get(self, "api_steps")
@api_steps.setter
def api_steps(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestApiStepArgs']]]]):
pulumi.set(self, "api_steps", value)
@property
@pulumi.getter
def assertions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]]]:
"""
Assertions used for the test. Multiple `assertion` blocks are allowed with the structure below.
"""
return pulumi.get(self, "assertions")
@assertions.setter
def assertions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestAssertionArgs']]]]):
pulumi.set(self, "assertions", value)
@property
@pulumi.getter(name="browserSteps")
def browser_steps(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]]]:
"""
Steps for browser tests.
"""
return pulumi.get(self, "browser_steps")
@browser_steps.setter
def browser_steps(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserStepArgs']]]]):
pulumi.set(self, "browser_steps", value)
@property
@pulumi.getter(name="browserVariables")
def browser_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]]]:
"""
Variables used for a browser test steps. Multiple `variable` blocks are allowed with the structure below.
"""
return pulumi.get(self, "browser_variables")
@browser_variables.setter
def browser_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestBrowserVariableArgs']]]]):
pulumi.set(self, "browser_variables", value)
@property
@pulumi.getter(name="configVariables")
def config_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]]]:
"""
Variables used for the test configuration. Multiple `config_variable` blocks are allowed with the structure below.
"""
return pulumi.get(self, "config_variables")
@config_variables.setter
def config_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SyntheticsTestConfigVariableArgs']]]]):
pulumi.set(self, "config_variables", value)
@property
@pulumi.getter(name="deviceIds")
def device_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Required if `type = "browser"`. Array with the different device IDs used to run the test. Valid values are `laptop_large`, `tablet`, `mobile_small`, `chrome.laptop_large`, `chrome.tablet`, `chrome.mobile_small`, `firefox.laptop_large`, `firefox.tablet`, `firefox.mobile_small`, `edge.laptop_large`, `edge.tablet`, `edge.mobile_small`.
"""
return pulumi.get(self, "device_ids")
@device_ids.setter
def device_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "device_ids", value)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Array of locations used to run the test. Refer to [Datadog documentation](https://docs.datadoghq.com/synthetics/api_test/#request) for available locations (e.g. `aws:eu-central-1`).
"""
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
A message to include with notifications for this synthetics test. Email notifications can be sent to specific users by using the same `@username` notation as events.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter(name="monitorId")
def monitor_id(self) -> Optional[pulumi.Input[int]]:
"""
ID of the monitor associated with the Datadog synthetics test.
"""
return pulumi.get(self, "monitor_id")
@monitor_id.setter
def monitor_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "monitor_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Datadog synthetics test.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="optionsList")
def options_list(self) -> Optional[pulumi.Input['SyntheticsTestOptionsListArgs']]:
return pulumi.get(self, "options_list")
@options_list.setter
def options_list(self, value: Optional[pulumi.Input['SyntheticsTestOptionsListArgs']]):
pulumi.set(self, "options_list", value)
@property
@pulumi.getter(name="requestBasicauth")
def request_basicauth(self) -> Optional[pulumi.Input['SyntheticsTestRequestBasicauthArgs']]:
"""
The HTTP basic authentication credentials. Exactly one nested block is allowed with the structure below.
"""
return pulumi.get(self, "request_basicauth")
@request_basicauth.setter
def request_basicauth(self, value: Optional[pulumi.Input['SyntheticsTestRequestBasicauthArgs']]):
pulumi.set(self, "request_basicauth", value)
@property
@pulumi.getter(name="requestClientCertificate")
def request_client_certificate(self) -> Optional[pulumi.Input['SyntheticsTestRequestClientCertificateArgs']]:
"""
Client certificate to use when performing the test request. Exactly one nested block is allowed with the structure below.
"""
return pulumi.get(self, "request_client_certificate")
@request_client_certificate.setter
def request_client_certificate(self, value: Optional[pulumi.Input['SyntheticsTestRequestClientCertificateArgs']]):
pulumi.set(self, "request_client_certificate", value)
@property
@pulumi.getter(name="requestDefinition")
def request_definition(self) -> Optional[pulumi.Input['SyntheticsTestRequestDefinitionArgs']]:
"""
Required if `type = "api"`. The synthetics test request.
"""
return pulumi.get(self, "request_definition")
@request_definition.setter
def request_definition(self, value: Optional[pulumi.Input['SyntheticsTestRequestDefinitionArgs']]):
pulumi.set(self, "request_definition", value)
@property
@pulumi.getter(name="requestHeaders")
def request_headers(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Header name and value map.
"""
return pulumi.get(self, "request_headers")
@request_headers.setter
def request_headers(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "request_headers", value)
@property
@pulumi.getter(name="requestQuery")
def request_query(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Query arguments name and value map.
"""
return pulumi.get(self, "request_query")
@request_query.setter
def request_query(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "request_query", value)
@property
@pulumi.getter(name="setCookie")
def set_cookie(self) -> Optional[pulumi.Input[str]]:
"""
Cookies to be used for a browser test request, using the [Set-Cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie) syntax.
"""
return pulumi.get(self, "set_cookie")
@set_cookie.setter
def set_cookie(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "set_cookie", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Define whether you want to start (`live`) or pause (`paused`) a Synthetic test. Valid values are `live`, `paused`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def subtype(self) -> Optional[pulumi.Input[str]]:
"""
The subtype of the Synthetic API test. Defaults to `http`. Valid values are `http`, `ssl`, `tcp`, `dns`, `multi`, `icmp`, `udp`, `websocket`.
"""
return pulumi.get(self, "subtype")
@subtype.setter
def subtype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subtype", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of tags to associate with your synthetics test. This can help you categorize and filter tests in the manage synthetics page of the UI. Default is an empty list (`[]`).
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Synthetics test type. Valid values are `api`, `browser`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class SyntheticsTest(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_steps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestApiStepArgs']]]]] = None,
assertions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestAssertionArgs']]]]] = None,
browser_steps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserStepArgs']]]]] = None,
browser_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserVariableArgs']]]]] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestConfigVariableArgs']]]]] = None,
device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
message: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
options_list: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestOptionsListArgs']]] = None,
request_basicauth: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestBasicauthArgs']]] = None,
request_client_certificate: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestClientCertificateArgs']]] = None,
request_definition: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestDefinitionArgs']]] = None,
request_headers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
request_query: Optional[pulumi.Input[Mapping[str, Any]]] = None,
set_cookie: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subtype: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Datadog synthetics test resource. This can be used to create and manage Datadog synthetics test.
#### *Warning*
Starting from version 3.1.0+, the direct usage of global variables in the configuration is deprecated, in favor of
local variables of type `global`. As an example, if you were previously using `{{ GLOBAL_VAR }}` directly in your
configuration, add a `config_variable` of type `global` with the `id` matching the `id` of the global variable `GLOBAL_VAR`, which can be found in the Synthetics UI or from the output of the `SyntheticsGlobalVariable` resource. The name can be chosen freely.
In practice, it means going from (simplified configuration):
```python
import pulumi
```
to
```python
import pulumi
config_variable = {
"name": "LOCAL_VAR",
"id": [your_global_variable_id],
"type": "global",
}
```
which you can now use in your request definition:
```python
import pulumi
```
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
# Example Usage (Synthetics API test)
# Create a new Datadog Synthetics API/HTTP test on https://www.example.org
test_api = datadog.SyntheticsTest("testApi",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="is",
target="200",
type="statusCode",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
monitor_options=datadog.SyntheticsTestOptionsListMonitorOptionsArgs(
renotify_interval=100,
),
retry=datadog.SyntheticsTestOptionsListRetryArgs(
count=2,
interval=300,
),
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
method="GET",
url="https://www.example.org",
),
request_headers={
"Authentication": "Token: 1234566789",
"Content-Type": "application/json",
},
status="live",
subtype="http",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics SSL test)
# Create a new Datadog Synthetics API/SSL test on example.org
test_ssl = datadog.SyntheticsTest("testSsl",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="isInMoreThan",
target="30",
type="certificate",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
accept_self_signed=True,
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
host="example.org",
port=443,
),
status="live",
subtype="ssl",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics TCP test)
# Create a new Datadog Synthetics API/TCP test on example.org
test_tcp = datadog.SyntheticsTest("testTcp",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="lessThan",
target="2000",
type="responseTime",
)],
config_variables=[datadog.SyntheticsTestConfigVariableArgs(
id="76636cd1-82e2-4aeb-9cfe-51366a8198a2",
name="MY_GLOBAL_VAR",
type="global",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
host="example.org",
port=443,
),
status="live",
subtype="tcp",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics DNS test)
# Create a new Datadog Synthetics API/DNS test on example.org
test_dns = datadog.SyntheticsTest("testDns",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="is",
property="A",
target="0.0.0.0",
type="recordSome",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
host="example.org",
),
status="live",
subtype="dns",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics Multistep API test)
# Create a new Datadog Synthetics Multistep API test
test = datadog.SyntheticsTest("test",
api_steps=[
datadog.SyntheticsTestApiStepArgs(
assertions=[datadog.SyntheticsTestApiStepAssertionArgs(
operator="is",
target="200",
type="statusCode",
)],
name="An API test on example.org",
request_definition=datadog.SyntheticsTestApiStepRequestDefinitionArgs(
method="GET",
url="https://example.org",
),
request_headers={
"Authentication": "Token: 1234566789",
"Content-Type": "application/json",
},
subtype="http",
),
datadog.SyntheticsTestApiStepArgs(
assertions=[datadog.SyntheticsTestApiStepAssertionArgs(
operator="is",
target="200",
type="statusCode",
)],
name="An API test on example.org",
request_definition=datadog.SyntheticsTestApiStepRequestDefinitionArgs(
method="GET",
url="http://example.org",
),
subtype="http",
),
],
locations=["aws:eu-central-1"],
name="Multistep API test",
options_list=datadog.SyntheticsTestOptionsListArgs(
accept_self_signed=True,
tick_every=900,
),
status="live",
subtype="multi",
type="api")
# Example Usage (Synthetics Browser test)
# Support for Synthetics Browser test steps is limited (see below)
# Create a new Datadog Synthetics Browser test starting on https://www.example.org
test_browser = datadog.SyntheticsTest("testBrowser",
browser_steps=[datadog.SyntheticsTestBrowserStepArgs(
name="Check current url",
params=datadog.SyntheticsTestBrowserStepParamsArgs(
check="contains",
value="datadoghq",
),
type="assertCurrentUrl",
)],
browser_variables=[
datadog.SyntheticsTestBrowserVariableArgs(
example="597",
name="MY_PATTERN_VAR",
pattern="{{numeric(3)}}",
type="text",
),
datadog.SyntheticsTestBrowserVariableArgs(
example="jd8-afe-ydv.4546132139@synthetics.dtdg.co",
name="MY_EMAIL_VAR",
pattern="jd8-afe-ydv.{{ numeric(10) }}@synthetics.dtdg.co",
type="email",
),
datadog.SyntheticsTestBrowserVariableArgs(
id="76636cd1-82e2-4aeb-9cfe-51366a8198a2",
name="MY_GLOBAL_VAR",
type="global",
),
],
device_ids=["laptop_large"],
locations=["aws:eu-central-1"],
message="Notify @qa",
name="A Browser test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
tick_every=3600,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
method="GET",
url="https://app.datadoghq.com",
),
status="paused",
tags=[],
type="browser")
```
## Import
# Synthetics tests can be imported using their public string ID, e.g.
```sh
$ pulumi import datadog:index/syntheticsTest:SyntheticsTest fizz abc-123-xyz
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestApiStepArgs']]]] api_steps: Steps for multistep api tests
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestAssertionArgs']]]] assertions: Assertions used for the test. Multiple `assertion` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserStepArgs']]]] browser_steps: Steps for browser tests.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserVariableArgs']]]] browser_variables: Variables used for a browser test steps. Multiple `variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestConfigVariableArgs']]]] config_variables: Variables used for the test configuration. Multiple `config_variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] device_ids: Required if `type = "browser"`. Array with the different device IDs used to run the test. Valid values are `laptop_large`, `tablet`, `mobile_small`, `chrome.laptop_large`, `chrome.tablet`, `chrome.mobile_small`, `firefox.laptop_large`, `firefox.tablet`, `firefox.mobile_small`, `edge.laptop_large`, `edge.tablet`, `edge.mobile_small`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: Array of locations used to run the test. Refer to [Datadog documentation](https://docs.datadoghq.com/synthetics/api_test/#request) for available locations (e.g. `aws:eu-central-1`).
:param pulumi.Input[str] message: A message to include with notifications for this synthetics test. Email notifications can be sent to specific users by using the same `@username` notation as events.
:param pulumi.Input[str] name: Name of Datadog synthetics test.
:param pulumi.Input[pulumi.InputType['SyntheticsTestRequestBasicauthArgs']] request_basicauth: The HTTP basic authentication credentials. Exactly one nested block is allowed with the structure below.
:param pulumi.Input[pulumi.InputType['SyntheticsTestRequestClientCertificateArgs']] request_client_certificate: Client certificate to use when performing the test request. Exactly one nested block is allowed with the structure below.
:param pulumi.Input[pulumi.InputType['SyntheticsTestRequestDefinitionArgs']] request_definition: Required if `type = "api"`. The synthetics test request.
:param pulumi.Input[Mapping[str, Any]] request_headers: Header name and value map.
:param pulumi.Input[Mapping[str, Any]] request_query: Query arguments name and value map.
:param pulumi.Input[str] set_cookie: Cookies to be used for a browser test request, using the [Set-Cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie) syntax.
:param pulumi.Input[str] status: Define whether you want to start (`live`) or pause (`paused`) a Synthetic test. Valid values are `live`, `paused`.
:param pulumi.Input[str] subtype: The subtype of the Synthetic API test. Defaults to `http`. Valid values are `http`, `ssl`, `tcp`, `dns`, `multi`, `icmp`, `udp`, `websocket`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tags to associate with your synthetics test. This can help you categorize and filter tests in the manage synthetics page of the UI. Default is an empty list (`[]`).
:param pulumi.Input[str] type: Synthetics test type. Valid values are `api`, `browser`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SyntheticsTestArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Datadog synthetics test resource. This can be used to create and manage Datadog synthetics test.
#### *Warning*
Starting from version 3.1.0+, the direct usage of global variables in the configuration is deprecated, in favor of
local variables of type `global`. As an example, if you were previously using `{{ GLOBAL_VAR }}` directly in your
configuration, add a `config_variable` of type `global` with the `id` matching the `id` of the global variable `GLOBAL_VAR`, which can be found in the Synthetics UI or from the output of the `SyntheticsGlobalVariable` resource. The name can be chosen freely.
In practice, it means going from (simplified configuration):
```python
import pulumi
```
to
```python
import pulumi
config_variable = {
"name": "LOCAL_VAR",
"id": [your_global_variable_id],
"type": "global",
}
```
which you can now use in your request definition:
```python
import pulumi
```
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
# Example Usage (Synthetics API test)
# Create a new Datadog Synthetics API/HTTP test on https://www.example.org
test_api = datadog.SyntheticsTest("testApi",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="is",
target="200",
type="statusCode",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
monitor_options=datadog.SyntheticsTestOptionsListMonitorOptionsArgs(
renotify_interval=100,
),
retry=datadog.SyntheticsTestOptionsListRetryArgs(
count=2,
interval=300,
),
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
method="GET",
url="https://www.example.org",
),
request_headers={
"Authentication": "Token: 1234566789",
"Content-Type": "application/json",
},
status="live",
subtype="http",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics SSL test)
# Create a new Datadog Synthetics API/SSL test on example.org
test_ssl = datadog.SyntheticsTest("testSsl",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="isInMoreThan",
target="30",
type="certificate",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
accept_self_signed=True,
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
host="example.org",
port=443,
),
status="live",
subtype="ssl",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics TCP test)
# Create a new Datadog Synthetics API/TCP test on example.org
test_tcp = datadog.SyntheticsTest("testTcp",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="lessThan",
target="2000",
type="responseTime",
)],
config_variables=[datadog.SyntheticsTestConfigVariableArgs(
id="76636cd1-82e2-4aeb-9cfe-51366a8198a2",
name="MY_GLOBAL_VAR",
type="global",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
host="example.org",
port=443,
),
status="live",
subtype="tcp",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics DNS test)
# Create a new Datadog Synthetics API/DNS test on example.org
test_dns = datadog.SyntheticsTest("testDns",
assertions=[datadog.SyntheticsTestAssertionArgs(
operator="is",
property="A",
target="0.0.0.0",
type="recordSome",
)],
locations=["aws:eu-central-1"],
message="Notify @pagerduty",
name="An API test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
tick_every=900,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
host="example.org",
),
status="live",
subtype="dns",
tags=[
"foo:bar",
"foo",
"env:test",
],
type="api")
# Example Usage (Synthetics Multistep API test)
# Create a new Datadog Synthetics Multistep API test
test = datadog.SyntheticsTest("test",
api_steps=[
datadog.SyntheticsTestApiStepArgs(
assertions=[datadog.SyntheticsTestApiStepAssertionArgs(
operator="is",
target="200",
type="statusCode",
)],
name="An API test on example.org",
request_definition=datadog.SyntheticsTestApiStepRequestDefinitionArgs(
method="GET",
url="https://example.org",
),
request_headers={
"Authentication": "Token: 1234566789",
"Content-Type": "application/json",
},
subtype="http",
),
datadog.SyntheticsTestApiStepArgs(
assertions=[datadog.SyntheticsTestApiStepAssertionArgs(
operator="is",
target="200",
type="statusCode",
)],
name="An API test on example.org",
request_definition=datadog.SyntheticsTestApiStepRequestDefinitionArgs(
method="GET",
url="http://example.org",
),
subtype="http",
),
],
locations=["aws:eu-central-1"],
name="Multistep API test",
options_list=datadog.SyntheticsTestOptionsListArgs(
accept_self_signed=True,
tick_every=900,
),
status="live",
subtype="multi",
type="api")
# Example Usage (Synthetics Browser test)
# Support for Synthetics Browser test steps is limited (see below)
# Create a new Datadog Synthetics Browser test starting on https://www.example.org
test_browser = datadog.SyntheticsTest("testBrowser",
browser_steps=[datadog.SyntheticsTestBrowserStepArgs(
name="Check current url",
params=datadog.SyntheticsTestBrowserStepParamsArgs(
check="contains",
value="datadoghq",
),
type="assertCurrentUrl",
)],
browser_variables=[
datadog.SyntheticsTestBrowserVariableArgs(
example="597",
name="MY_PATTERN_VAR",
pattern="{{numeric(3)}}",
type="text",
),
datadog.SyntheticsTestBrowserVariableArgs(
example="jd8-afe-ydv.4546132139@synthetics.dtdg.co",
name="MY_EMAIL_VAR",
pattern="jd8-afe-ydv.{{ numeric(10) }}@synthetics.dtdg.co",
type="email",
),
datadog.SyntheticsTestBrowserVariableArgs(
id="76636cd1-82e2-4aeb-9cfe-51366a8198a2",
name="MY_GLOBAL_VAR",
type="global",
),
],
device_ids=["laptop_large"],
locations=["aws:eu-central-1"],
message="Notify @qa",
name="A Browser test on example.org",
options_list=datadog.SyntheticsTestOptionsListArgs(
tick_every=3600,
),
request_definition=datadog.SyntheticsTestRequestDefinitionArgs(
method="GET",
url="https://app.datadoghq.com",
),
status="paused",
tags=[],
type="browser")
```
## Import
# Synthetics tests can be imported using their public string ID, e.g.
```sh
$ pulumi import datadog:index/syntheticsTest:SyntheticsTest fizz abc-123-xyz
```
:param str resource_name: The name of the resource.
:param SyntheticsTestArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SyntheticsTestArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_steps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestApiStepArgs']]]]] = None,
assertions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestAssertionArgs']]]]] = None,
browser_steps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserStepArgs']]]]] = None,
browser_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserVariableArgs']]]]] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestConfigVariableArgs']]]]] = None,
device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
message: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
options_list: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestOptionsListArgs']]] = None,
request_basicauth: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestBasicauthArgs']]] = None,
request_client_certificate: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestClientCertificateArgs']]] = None,
request_definition: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestDefinitionArgs']]] = None,
request_headers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
request_query: Optional[pulumi.Input[Mapping[str, Any]]] = None,
set_cookie: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subtype: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SyntheticsTestArgs.__new__(SyntheticsTestArgs)
__props__.__dict__["api_steps"] = api_steps
__props__.__dict__["assertions"] = assertions
__props__.__dict__["browser_steps"] = browser_steps
__props__.__dict__["browser_variables"] = browser_variables
__props__.__dict__["config_variables"] = config_variables
__props__.__dict__["device_ids"] = device_ids
if locations is None and not opts.urn:
raise TypeError("Missing required property 'locations'")
__props__.__dict__["locations"] = locations
__props__.__dict__["message"] = message
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["options_list"] = options_list
__props__.__dict__["request_basicauth"] = request_basicauth
__props__.__dict__["request_client_certificate"] = request_client_certificate
__props__.__dict__["request_definition"] = request_definition
__props__.__dict__["request_headers"] = request_headers
__props__.__dict__["request_query"] = request_query
__props__.__dict__["set_cookie"] = set_cookie
if status is None and not opts.urn:
raise TypeError("Missing required property 'status'")
__props__.__dict__["status"] = status
__props__.__dict__["subtype"] = subtype
__props__.__dict__["tags"] = tags
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["monitor_id"] = None
super(SyntheticsTest, __self__).__init__(
'datadog:index/syntheticsTest:SyntheticsTest',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_steps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestApiStepArgs']]]]] = None,
assertions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestAssertionArgs']]]]] = None,
browser_steps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserStepArgs']]]]] = None,
browser_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserVariableArgs']]]]] = None,
config_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestConfigVariableArgs']]]]] = None,
device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
message: Optional[pulumi.Input[str]] = None,
monitor_id: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
options_list: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestOptionsListArgs']]] = None,
request_basicauth: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestBasicauthArgs']]] = None,
request_client_certificate: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestClientCertificateArgs']]] = None,
request_definition: Optional[pulumi.Input[pulumi.InputType['SyntheticsTestRequestDefinitionArgs']]] = None,
request_headers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
request_query: Optional[pulumi.Input[Mapping[str, Any]]] = None,
set_cookie: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subtype: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'SyntheticsTest':
"""
Get an existing SyntheticsTest resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestApiStepArgs']]]] api_steps: Steps for multistep api tests
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestAssertionArgs']]]] assertions: Assertions used for the test. Multiple `assertion` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserStepArgs']]]] browser_steps: Steps for browser tests.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestBrowserVariableArgs']]]] browser_variables: Variables used for a browser test steps. Multiple `variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SyntheticsTestConfigVariableArgs']]]] config_variables: Variables used for the test configuration. Multiple `config_variable` blocks are allowed with the structure below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] device_ids: Required if `type = "browser"`. Array with the different device IDs used to run the test. Valid values are `laptop_large`, `tablet`, `mobile_small`, `chrome.laptop_large`, `chrome.tablet`, `chrome.mobile_small`, `firefox.laptop_large`, `firefox.tablet`, `firefox.mobile_small`, `edge.laptop_large`, `edge.tablet`, `edge.mobile_small`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: Array of locations used to run the test. Refer to [Datadog documentation](https://docs.datadoghq.com/synthetics/api_test/#request) for available locations (e.g. `aws:eu-central-1`).
:param pulumi.Input[str] message: A message to include with notifications for this synthetics test. Email notifications can be sent to specific users by using the same `@username` notation as events.
:param pulumi.Input[int] monitor_id: ID of the monitor associated with the Datadog synthetics test.
:param pulumi.Input[str] name: Name of Datadog synthetics test.
:param pulumi.Input[pulumi.InputType['SyntheticsTestRequestBasicauthArgs']] request_basicauth: The HTTP basic authentication credentials. Exactly one nested block is allowed with the structure below.
:param pulumi.Input[pulumi.InputType['SyntheticsTestRequestClientCertificateArgs']] request_client_certificate: Client certificate to use when performing the test request. Exactly one nested block is allowed with the structure below.
:param pulumi.Input[pulumi.InputType['SyntheticsTestRequestDefinitionArgs']] request_definition: Required if `type = "api"`. The synthetics test request.
:param pulumi.Input[Mapping[str, Any]] request_headers: Header name and value map.
:param pulumi.Input[Mapping[str, Any]] request_query: Query arguments name and value map.
:param pulumi.Input[str] set_cookie: Cookies to be used for a browser test request, using the [Set-Cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie) syntax.
:param pulumi.Input[str] status: Define whether you want to start (`live`) or pause (`paused`) a Synthetic test. Valid values are `live`, `paused`.
:param pulumi.Input[str] subtype: The subtype of the Synthetic API test. Defaults to `http`. Valid values are `http`, `ssl`, `tcp`, `dns`, `multi`, `icmp`, `udp`, `websocket`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tags to associate with your synthetics test. This can help you categorize and filter tests in the manage synthetics page of the UI. Default is an empty list (`[]`).
:param pulumi.Input[str] type: Synthetics test type. Valid values are `api`, `browser`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SyntheticsTestState.__new__(_SyntheticsTestState)
__props__.__dict__["api_steps"] = api_steps
__props__.__dict__["assertions"] = assertions
__props__.__dict__["browser_steps"] = browser_steps
__props__.__dict__["browser_variables"] = browser_variables
__props__.__dict__["config_variables"] = config_variables
__props__.__dict__["device_ids"] = device_ids
__props__.__dict__["locations"] = locations
__props__.__dict__["message"] = message
__props__.__dict__["monitor_id"] = monitor_id
__props__.__dict__["name"] = name
__props__.__dict__["options_list"] = options_list
__props__.__dict__["request_basicauth"] = request_basicauth
__props__.__dict__["request_client_certificate"] = request_client_certificate
__props__.__dict__["request_definition"] = request_definition
__props__.__dict__["request_headers"] = request_headers
__props__.__dict__["request_query"] = request_query
__props__.__dict__["set_cookie"] = set_cookie
__props__.__dict__["status"] = status
__props__.__dict__["subtype"] = subtype
__props__.__dict__["tags"] = tags
__props__.__dict__["type"] = type
return SyntheticsTest(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiSteps")
def api_steps(self) -> pulumi.Output[Optional[Sequence['outputs.SyntheticsTestApiStep']]]:
"""
Steps for multistep api tests
"""
return pulumi.get(self, "api_steps")
@property
@pulumi.getter
def assertions(self) -> pulumi.Output[Optional[Sequence['outputs.SyntheticsTestAssertion']]]:
"""
Assertions used for the test. Multiple `assertion` blocks are allowed with the structure below.
"""
return pulumi.get(self, "assertions")
@property
@pulumi.getter(name="browserSteps")
def browser_steps(self) -> pulumi.Output[Optional[Sequence['outputs.SyntheticsTestBrowserStep']]]:
"""
Steps for browser tests.
"""
return pulumi.get(self, "browser_steps")
@property
@pulumi.getter(name="browserVariables")
def browser_variables(self) -> pulumi.Output[Optional[Sequence['outputs.SyntheticsTestBrowserVariable']]]:
"""
Variables used for a browser test steps. Multiple `variable` blocks are allowed with the structure below.
"""
return pulumi.get(self, "browser_variables")
@property
@pulumi.getter(name="configVariables")
def config_variables(self) -> pulumi.Output[Optional[Sequence['outputs.SyntheticsTestConfigVariable']]]:
"""
Variables used for the test configuration. Multiple `config_variable` blocks are allowed with the structure below.
"""
return pulumi.get(self, "config_variables")
@property
@pulumi.getter(name="deviceIds")
def device_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Required if `type = "browser"`. Array with the different device IDs used to run the test. Valid values are `laptop_large`, `tablet`, `mobile_small`, `chrome.laptop_large`, `chrome.tablet`, `chrome.mobile_small`, `firefox.laptop_large`, `firefox.tablet`, `firefox.mobile_small`, `edge.laptop_large`, `edge.tablet`, `edge.mobile_small`.
"""
return pulumi.get(self, "device_ids")
@property
@pulumi.getter
def locations(self) -> pulumi.Output[Sequence[str]]:
"""
Array of locations used to run the test. Refer to [Datadog documentation](https://docs.datadoghq.com/synthetics/api_test/#request) for available locations (e.g. `aws:eu-central-1`).
"""
return pulumi.get(self, "locations")
@property
@pulumi.getter
def message(self) -> pulumi.Output[Optional[str]]:
"""
A message to include with notifications for this synthetics test. Email notifications can be sent to specific users by using the same `@username` notation as events.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter(name="monitorId")
def monitor_id(self) -> pulumi.Output[int]:
"""
ID of the monitor associated with the Datadog synthetics test.
"""
return pulumi.get(self, "monitor_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of Datadog synthetics test.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="optionsList")
def options_list(self) -> pulumi.Output[Optional['outputs.SyntheticsTestOptionsList']]:
return pulumi.get(self, "options_list")
@property
@pulumi.getter(name="requestBasicauth")
def request_basicauth(self) -> pulumi.Output[Optional['outputs.SyntheticsTestRequestBasicauth']]:
"""
The HTTP basic authentication credentials. Exactly one nested block is allowed with the structure below.
"""
return pulumi.get(self, "request_basicauth")
@property
@pulumi.getter(name="requestClientCertificate")
def request_client_certificate(self) -> pulumi.Output[Optional['outputs.SyntheticsTestRequestClientCertificate']]:
"""
Client certificate to use when performing the test request. Exactly one nested block is allowed with the structure below.
"""
return pulumi.get(self, "request_client_certificate")
@property
@pulumi.getter(name="requestDefinition")
def request_definition(self) -> pulumi.Output[Optional['outputs.SyntheticsTestRequestDefinition']]:
"""
Required if `type = "api"`. The synthetics test request.
"""
return pulumi.get(self, "request_definition")
@property
@pulumi.getter(name="requestHeaders")
def request_headers(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Header name and value map.
"""
return pulumi.get(self, "request_headers")
@property
@pulumi.getter(name="requestQuery")
def request_query(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Query arguments name and value map.
"""
return pulumi.get(self, "request_query")
@property
@pulumi.getter(name="setCookie")
def set_cookie(self) -> pulumi.Output[Optional[str]]:
"""
Cookies to be used for a browser test request, using the [Set-Cookie](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie) syntax.
"""
return pulumi.get(self, "set_cookie")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Define whether you want to start (`live`) or pause (`paused`) a Synthetic test. Valid values are `live`, `paused`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def subtype(self) -> pulumi.Output[Optional[str]]:
"""
The subtype of the Synthetic API test. Defaults to `http`. Valid values are `http`, `ssl`, `tcp`, `dns`, `multi`, `icmp`, `udp`, `websocket`.
"""
return pulumi.get(self, "subtype")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of tags to associate with your synthetics test. This can help you categorize and filter tests in the manage synthetics page of the UI. Default is an empty list (`[]`).
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Synthetics test type. Valid values are `api`, `browser`.
"""
return pulumi.get(self, "type")
|
text = u'hi\xe2'
try:
print(text.encode('ascii'))
except UnicodeEncodeError:
print('except')
print(text.encode('utf-8')) |
"""
Model
"""
import logging
from boto.exception import JSONResponseError
from boto.dynamodb2.fields import HashKey, RangeKey
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import NUMBER
class Model(object):
def __init__(self, table_prefix):
self.spot_prices = _create_table(
'_'.join([table_prefix, 'spot_price']),
schema=[
HashKey('instance_zone_id'),
RangeKey('timestamp', data_type=NUMBER),
],
throughput={'read': 5, 'write': 5},
)
self.instance_zones = _create_table(
'_'.join([table_prefix, 'instance_zone']),
schema=[
HashKey('instance_id'),
RangeKey('zone'),
],
throughput={'read': 1, 'write': 1},
)
self.regions = _create_table(
'_'.join([table_prefix, 'region']),
schema=[
HashKey('region'),
],
throughput={'read': 1, 'write': 1},
)
self.product_descriptions = _create_table(
'_'.join([table_prefix, 'product_description']),
schema=[
HashKey('product_description'),
],
throughput={'read': 1, 'write': 1},
)
self.instance_types = _create_table(
'_'.join([table_prefix, 'instance_type']),
schema=[
HashKey('instance_type'),
],
throughput={'read': 1, 'write': 1},
)
self.progress = _create_table(
'_'.join([table_prefix, 'progress']),
schema=[
HashKey('name'),
],
throughput={'read': 1, 'write': 1},
)
def _create_table(table_name, schema, throughput):
if _table_exists(table_name):
logging.debug('using existing table: %s', table_name)
return Table(table_name, schema=schema)
else:
logging.debug('creating table: %s', table_name)
return Table.create(table_name, schema=schema, throughput=throughput)
def _table_exists(table_name):
table = Table(table_name)
try:
table.describe()
except JSONResponseError as exc:
if exc.error_code == 'ResourceNotFoundException':
return False
return True
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY rendreCT, INrendreCT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action rendre."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Rend les objets donnés au PNJ.
Cette action est très spécifique. Elle est à utiliser dans un
évènement 'donne' dans le cas où on souhaite rendre ce que l'on
a donné au PNJ. Elle peut être utilisé dans d'autres évènements,
mais les variables 'pnj', 'personnage', 'objet' et 'quantite'
doivent alors être présentes.
"""
entrer_variables = True
@classmethod
def init_types(cls):
cls.ajouter_types(cls.rendre)
cls.ajouter_types(cls.rendre, "str")
@staticmethod
def rendre(verbe="redonne", variables=None):
"""Rend les objets donnés au PNJ.
Paramètres à entrer :
* verbe : le verbe à utiliser (optionnel)
Cette action suppose que les variables de l'évènement 'donne'
existent : 'pnj' doit contenir le PNJ auquel on a donné les
objets, 'personnage' doit contenir le personnage (joueur ou
non) qui lui a donné les objets, 'objet' doit contenir l'objet
ou le prototype d'objet donné et 'quantite' doit contenir la
quantité d'objet donnés.
L'utiliser est très simple si ces variables existent (ce qui
est le cas dans l'évènement 'donner').
rendre
# Ou bien de façon plus personnalisée
rendre "donne"
La seule différence entre les deux syntaxes est le message
qui sera envoyé au personnage qui reçoit les objets. Dans
le premier cas, le message sera "${pnj} vous redonne ${objets}.">
Dans le second, ce sera "${pnj} vous donne ${objets}.
Cette action est plus rapide qu'un script désigné pour faire
la même chose. Outre redonner les objets donnés, elle se charge
des cas un peu plus difficiles à scripter qui se produisent quand :
* Plusieurs objets sont donnés d'un coup au PNJ ;
* L'objet en question est de l'argent, donc un objet non unique.
"""
pnj = variables.get("pnj")
personnage = variables.get("personnage")
objet = variables.get("objet")
quantite = variables.get("quantite")
# Vérifie que les objets sont bien renseignés
if any(v is None for v in (pnj, personnage, objet, quantite)):
raise ErreurExecution("Des variables n'ont pas été définies " \
": pnj={}, personnage={}, objet={}, quantite={}".format(
pnj, personnage, objet, quantite))
quantite = int(quantite)
personnage.envoyer("{{}} vous {verbe} {}.".format(objet.get_nom(
quantite), verbe=verbe), pnj)
if objet.unique:
if quantite == 1:
objets = {objet}
else:
objets = [o for o in pnj.equipement.inventaire if \
o.cle == objet.cle]
objets = list(reversed(objets))[:quantite]
for objet in objets:
if objet.contenu:
try:
objet.contenu.retirer(objet)
except ValueError:
pass
personnage.ramasser_ou_poser(objet)
else:
# L'objet est non unique
pnj.retirer(objet, quantite)
personnage.ramasser_ou_poser(objet, quantite)
|
# -*- coding: utf-8 -*-
"""Artifact attribute containers."""
from plaso.containers import interface
from plaso.containers import manager
from plaso.lib import definitions
class ArtifactAttributeContainer(interface.AttributeContainer):
"""Base class to represent an artifact attribute container."""
class SystemSubConfigurationArtifactAttributeContainer(
ArtifactAttributeContainer):
"""System sub configuration artifact attribute container."""
def __init__(self):
"""Initializes a system sub configuration artifact attribute container."""
super(SystemSubConfigurationArtifactAttributeContainer, self).__init__()
self._system_configuration_identifier = None
def GetSystemConfigurationIdentifier(self):
"""Retrieves the identifier of the associated system configuration.
The system configuration identifier is a storage specific value that
requires special handling during serialization.
Returns:
AttributeContainerIdentifier: system configuration identifier or None
when not set.
"""
return self._system_configuration_identifier
def SetSystemConfigurationIdentifier(self, system_configuration_identifier):
"""Sets the identifier of the associated system configuration.
The system configuration identifier is a storage specific value that
requires special handling during serialization.
Args:
system_configuration_identifier (AttributeContainerIdentifier): system
configuration identifier.
"""
self._system_configuration_identifier = system_configuration_identifier
class EnvironmentVariableArtifact(ArtifactAttributeContainer):
"""Environment variable artifact attribute container.
Also see:
https://en.wikipedia.org/wiki/Environment_variable
Attributes:
case_sensitive (bool): True if environment variable name is case sensitive.
name (str): environment variable name such as "SystemRoot" as in
"%SystemRoot%" or "HOME" as in "$HOME".
value (str): environment variable value such as "C:\\Windows" or
"/home/user".
"""
CONTAINER_TYPE = 'environment_variable'
def __init__(self, case_sensitive=True, name=None, value=None):
"""Initializes an environment variable artifact.
Args:
case_sensitive (Optional[bool]): True if environment variable name
is case sensitive.
name (Optional[str]): environment variable name.
value (Optional[str]): environment variable value.
"""
super(EnvironmentVariableArtifact, self).__init__()
self.case_sensitive = case_sensitive
self.name = name
self.value = value
class HostnameArtifact(ArtifactAttributeContainer):
"""Hostname artifact attribute container.
Also see:
https://en.wikipedia.org/wiki/Hostname
Cybox / Stix Hostname Object
Attributes:
name (str): name of the host according to the naming schema.
schema (str): naming schema such as "DNS", "NIS", "SMB/NetBIOS".
"""
CONTAINER_TYPE = 'hostname'
def __init__(self, name=None, schema='DNS'):
"""Initializes a hostname artifact.
Args:
name (Optional[str]): name of the host according to the naming schema.
schema (Optional[str]): naming schema.
"""
super(HostnameArtifact, self).__init__()
self.name = name
self.schema = schema
class OperatingSystemArtifact(ArtifactAttributeContainer):
"""Operating system artifact attribute container.
Attributes:
family (str): operating system family name, such as "Linux", "MacOS"
or "Windows", defined in definitions.OPERATING_SYSTEM_FAMILIES.
This value is used to programmatically link a parser preset to an
operating system and therefore must be one of predefined values.
name (str): operating system name, such as "macOS Mojave" or "Windows XP".
This value is used to programmatically link a parser preset to an
operating system and therefore must be one of predefined values.
product (str): product information, such as "macOS Mojave" or "Windows
Professional XP". This value is typically obtained from the source data.
version (str): version, such as "10.14.1" or "5.1". This value is typically
obtained from the source data.
"""
CONTAINER_TYPE = 'operating_system'
_DEFAULT_FAMILY_AND_VERSION = (
definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN, (0, 0))
_FAMILY_AND_VERSION_PER_NAME = {
'Windows 2000': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (5, 0)),
'Windows 2003': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (5, 2)),
'Windows 2003 R2': (
definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (5, 2)),
'Windows 2008': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 0)),
'Windows 2008 R2': (
definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 1)),
'Windows 2012': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 2)),
'Windows 2012 R2': (
definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 3)),
'Windows 2016': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (10, 0)),
'Windows 2019': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (10, 0)),
'Windows 7': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 1)),
'Windows 8': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 2)),
'Windows 8.1': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 3)),
'Windows 10': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (10, 0)),
'Windows Vista': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (6, 0)),
'Windows XP': (definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT, (5, 1))}
def __init__(self, family=None, product=None, version=None):
"""Initializes an operating system artifact.
Args:
family (Optional[str]): operating system family name, such as "Linux",
"MacOS" or "Windows", defined in definitions.OPERATING_SYSTEM_FAMILIES.
This value is used to programmatically link a parser preset to an
operating system and therefore must be one of predefined values.
product (Optional[str]): product information, such as "macOS Mojave" or
"Windows Professional XP". This value is typically obtained from the
source data.
version (Optional[str]): version, such as "10.14.1" or "5.1". This value
is typically obtained from the source data.
"""
super(OperatingSystemArtifact, self).__init__()
self.family = family
self.name = None
self.product = product
self.version = version
if product:
self.name = self._GetNameFromProduct()
@property
def version_tuple(self):
"""tuple[int]: version tuple or None if version is not set or invalid."""
try:
# pylint: disable=consider-using-generator
return tuple([int(digit, 10) for digit in self.version.split('.')])
except (AttributeError, TypeError, ValueError):
return None
def _GetNameFromProduct(self):
"""Determines the predefined operating system name from the product.
Returns:
str: operating system name, such as "macOS Mojave" or "Windows XP" or
None if the name cannot be determined. This value is used to
programmatically link a parser preset to an operating system and
therefore must be one of predefined values.
"""
product = self.product or ''
product = product.split(' ')
product_lower_case = [segment.lower() for segment in product]
number_of_segments = len(product)
if 'windows' in product_lower_case:
segment_index = product_lower_case.index('windows') + 1
if product_lower_case[segment_index] in ('(r)', 'server', 'web'):
segment_index += 1
# Check if the version has a suffix.
suffix_segment_index = segment_index + 1
if (suffix_segment_index < number_of_segments and
product_lower_case[suffix_segment_index] == 'r2'):
return 'Windows {0:s} R2'.format(product[segment_index])
return 'Windows {0:s}'.format(product[segment_index])
return None
def IsEquivalent(self, other):
"""Determines if 2 operating system artifacts are equivalent.
This function compares the operating systems based in order of:
* name derived from product
* family and version
* family
Args:
other (OperatingSystemArtifact): operating system artifact attribute
container to compare with.
Returns:
bool: True if the operating systems are considered equivalent, False if
the most specific criteria do no match, or no criteria are available.
"""
if self.name and other.name:
return self.name == other.name
if self.name:
self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get(
self.name, self._DEFAULT_FAMILY_AND_VERSION)
return (
self_family == other.family and
self_version_tuple == other.version_tuple)
if self.family and self.version:
if other.name:
other_family, other_version_tuple = (
self._FAMILY_AND_VERSION_PER_NAME.get(
other.name, self._DEFAULT_FAMILY_AND_VERSION))
else:
other_family = other.family
other_version_tuple = other.version_tuple
return (
self.family == other_family and
self.version_tuple == other_version_tuple)
if self.family:
if other.name:
other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get(
other.name, self._DEFAULT_FAMILY_AND_VERSION)
else:
other_family = other.family
return self.family == other_family
return False
class PathArtifact(ArtifactAttributeContainer):
"""Path artifact attribute container.
Attributes:
data_stream (str): name of a data stream.
path_segment_separator (str): path segment separator.
path_segments (list[str]): path segments.
"""
CONTAINER_TYPE = 'path'
def __init__(self, data_stream=None, path=None, path_segment_separator='/'):
"""Initializes a path artifact.
Args:
data_stream (Optional[str]): name of a data stream.
path (Optional[str]): a path.
path_segment_separator (Optional[str]): path segment separator.
"""
super(PathArtifact, self).__init__()
self.data_stream = data_stream
self.path_segment_separator = path_segment_separator
self.path_segments = self._SplitPath(path, path_segment_separator)
def __eq__(self, other):
"""Determines if the path is equal to other.
Args:
other (str): path to compare against.
Returns:
bool: True if the path are equal to other.
"""
if not isinstance(other, str):
return False
other_path_segments = self._SplitPath(other, self.path_segment_separator)
return self.path_segments == other_path_segments
def __ge__(self, other):
"""Determines if the path are greater than or equal to other.
Args:
other (str): path to compare against.
Returns:
bool: True if the path are greater than or equal to other.
Raises:
ValueError: if other is not an instance of string.
"""
if not isinstance(other, str):
raise ValueError('Other not an instance of string.')
other_path_segments = self._SplitPath(other, self.path_segment_separator)
return self.path_segments >= other_path_segments
def __gt__(self, other):
"""Determines if the path are greater than other.
Args:
other (str): path to compare against.
Returns:
bool: True if the path are greater than other.
Raises:
ValueError: if other is not an instance of string.
"""
if not isinstance(other, str):
raise ValueError('Other not an instance of string.')
other_path_segments = self._SplitPath(other, self.path_segment_separator)
return self.path_segments > other_path_segments
def __le__(self, other):
"""Determines if the path are greater than or equal to other.
Args:
other (str): path to compare against.
Returns:
bool: True if the path are greater than or equal to other.
Raises:
ValueError: if other is not an instance of string.
"""
if not isinstance(other, str):
raise ValueError('Other not an instance of string.')
other_path_segments = self._SplitPath(other, self.path_segment_separator)
return self.path_segments <= other_path_segments
def __lt__(self, other):
"""Determines if the path are less than other.
Args:
other (str): path to compare against.
Returns:
bool: True if the path are less than other.
Raises:
ValueError: if other is not an instance of string.
"""
if not isinstance(other, str):
raise ValueError('Other not an instance of string.')
other_path_segments = self._SplitPath(other, self.path_segment_separator)
return self.path_segments < other_path_segments
def __ne__(self, other):
"""Determines if the path are not equal to other.
Args:
other (str): path to compare against.
Returns:
bool: True if the path are not equal to other.
"""
if not isinstance(other, str):
return False
other_path_segments = self._SplitPath(other, self.path_segment_separator)
return self.path_segments != other_path_segments
def _SplitPath(self, path, path_segment_separator):
"""Splits a path.
Args:
path (str): a path.
path_segment_separator (str): path segment separator.
Returns:
list[str]: path segments.
"""
path = path or ''
split_path = path.split(path_segment_separator)
path_segments = [split_path[0]]
path_segments.extend(list(filter(None, split_path[1:])))
return path_segments
def ContainedIn(self, other):
"""Determines if the path are contained in other.
Args:
other (str): path to compare against.
Returns:
bool: True if the path is contained in other.
"""
if isinstance(other, str):
number_of_path_segments = len(self.path_segments)
other_path_segments = self._SplitPath(other, self.path_segment_separator)
number_of_other_path_segments = len(other_path_segments)
if number_of_path_segments < number_of_other_path_segments:
maximum_compare_length = (
number_of_other_path_segments - number_of_path_segments + 1)
for compare_start_index in range(0, maximum_compare_length):
compare_end_index = compare_start_index + number_of_path_segments
compare_path_segments = other_path_segments[
compare_start_index:compare_end_index]
if self.path_segments == compare_path_segments:
return True
return False
class SourceConfigurationArtifact(ArtifactAttributeContainer):
"""Source configuration artifact attribute container.
The source configuration contains the configuration data of a source
that is (or going to be) processed such as volume in a storage media
image or a mounted directory.
Attributes:
mount_path (str): path of a "mounted" directory input source.
path_spec (dfvfs.PathSpec): path specification of the source that is
processed.
system_configuration (SystemConfigurationArtifact): system configuration of
a specific system installation, such as Windows or Linux, detected by
the pre-processing on the source.
"""
CONTAINER_TYPE = 'source_configuration'
def __init__(self, path_spec=None):
"""Initializes a source configuration artifact.
Args:
path_spec (Optional[dfvfs.PathSpec]): path specification of the source
that is processed.
"""
super(SourceConfigurationArtifact, self).__init__()
self.mount_path = None
self.path_spec = path_spec
self.system_configuration = None
class SystemConfigurationArtifact(ArtifactAttributeContainer):
"""System configuration artifact attribute container.
The system configuration contains the configuration data of a specific
system installation such as Windows or Linux.
Attributes:
available_time_zones (list[TimeZone]): available time zones.
code_page (str): system code page.
hostname (HostnameArtifact): hostname.
keyboard_layout (str): keyboard layout.
operating_system (str): operating system for example "MacOS" or "Windows".
operating_system_product (str): operating system product for example
"Windows XP".
operating_system_version (str): operating system version for example
"10.9.2" or "8.1".
time_zone (str): system time zone.
user_accounts (list[UserAccountArtifact]): user accounts.
"""
CONTAINER_TYPE = 'system_configuration'
def __init__(self, code_page=None, time_zone=None):
"""Initializes a system configuration artifact.
Args:
code_page (Optional[str]): system code page.
time_zone (Optional[str]): system time zone.
"""
super(SystemConfigurationArtifact, self).__init__()
self.available_time_zones = []
self.code_page = code_page
self.hostname = None
self.keyboard_layout = None
self.operating_system = None
self.operating_system_product = None
self.operating_system_version = None
self.time_zone = time_zone
self.user_accounts = []
class TimeZoneArtifact(ArtifactAttributeContainer):
"""Time zone artifact attribute container.
Attributes:
localized_name (str): name describing the time zone in localized language
for example "Greenwich (standaardtijd)".
mui_form (str): MUI form of the name describing the time zone for example
"@tzres.dll,-112".
name (str): name describing the time zone for example "Greenwich Standard
Time".
offset (int): time zone offset in number of minutes from UTC.
"""
CONTAINER_TYPE = 'time_zone'
def __init__(
self, localized_name=None, mui_form=None, name=None, offset=None):
"""Initializes a time zone artifact.
Args:
localized_name (Optional[str]): name describing the time zone in localized
language for example "Greenwich (standaardtijd)".
mui_form (Optional[str]): MUI form of the name describing the time zone
for example "@tzres.dll,-112".
name (Optional[str]): name describing the time zone for example "Greenwich
Standard Time".
offset (Optional[int]): time zone offset in number of minutes from UTC.
"""
super(TimeZoneArtifact, self).__init__()
self.localized_name = localized_name
self.mui_form = mui_form
self.name = name
self.offset = offset
class UserAccountArtifact(ArtifactAttributeContainer):
"""User account artifact attribute container.
Also see:
Cybox / Stix User Account Object
Attributes:
full_name (str): name describing the user.
group_identifier (str): identifier of the primary group the user is part of.
identifier (str): user identifier.
user_directory (str): path of the user (or home or profile) directory.
username (str): name uniquely identifying the user.
"""
CONTAINER_TYPE = 'user_account'
def __init__(
self, full_name=None, group_identifier=None, identifier=None,
path_separator='/', user_directory=None, username=None):
"""Initializes a user account artifact.
Args:
full_name (Optional[str]): name describing the user.
group_identifier (Optional[str]): identifier of the primary group
the user is part of.
identifier (Optional[str]): user identifier.
path_separator (Optional[str]): path segment separator.
user_directory (Optional[str]): path of the user (or home or profile)
directory.
username (Optional[str]): name uniquely identifying the user.
"""
super(UserAccountArtifact, self).__init__()
self._path_separator = path_separator
self.full_name = full_name
self.group_identifier = group_identifier
self.identifier = identifier
# TODO: add shell.
self.user_directory = user_directory
self.username = username
def GetUserDirectoryPathSegments(self):
"""Retrieves the path segments of the user directory.
Returns:
list[str]: path segments of the user directory or an empty list if no
user directory is set.
"""
if not self.user_directory:
return []
return self.user_directory.split(self._path_separator)
class WindowsEventLogProviderArtifact(
SystemSubConfigurationArtifactAttributeContainer):
"""Windows Event Log provider artifact attribute container.
Attributes:
category_message_files (list[str]): filenames of the category message files.
event_message_files (list[str]): filenames of the event message files.
log_source (str): Windows Event Log source.
log_type (str): Windows Event Log type.
parameter_message_files (list[str]): filenames of the parameter message
files.
"""
CONTAINER_TYPE = 'windows_eventlog_provider'
def __init__(
self, category_message_files=None, event_message_files=None,
log_source=None, log_type=None, parameter_message_files=None):
"""Initializes a Windows Even tLog provider artifact.
Args:
category_message_files (Optional[list[str]]): filenames of the category
message files.
event_message_files (Optional[list[str]]): filenames of the event message
files.
log_source (Optional[str]): Windows Event Log source.
log_type (Optional[str]): Windows Event Log type.
parameter_message_files (Optional[list[str]]): filenames of the parameter
message files.
"""
super(WindowsEventLogProviderArtifact, self).__init__()
self.category_message_files = category_message_files
self.event_message_files = event_message_files
self.log_source = log_source
self.log_type = log_type
self.parameter_message_files = parameter_message_files
manager.AttributeContainersManager.RegisterAttributeContainers([
EnvironmentVariableArtifact, HostnameArtifact, OperatingSystemArtifact,
PathArtifact, SourceConfigurationArtifact, SystemConfigurationArtifact,
TimeZoneArtifact, UserAccountArtifact, WindowsEventLogProviderArtifact])
|
import unittest
from unittest.mock import patch
import petstore_api
class StubPoolManager(object):
actual_kwargs = None
def __init__(self, num_pools=10, headers=None, **kwargs):
# Matches the contract of urllib3.PoolManager
self.actual_kwargs = kwargs
class StubProxyManager:
actual_kwargs = None
def __init__(
self,
proxy_url,
num_pools=10,
headers=None,
proxy_headers=None,
**kwargs
):
# Matches the contract of urllib3.ProxyManager
self.actual_kwargs = kwargs
class TestExtraOptionsForPools(unittest.TestCase):
def test_socket_options_get_passed_to_pool_manager(self):
socket_options = ["extra", "socket", "options"]
config = petstore_api.Configuration(host="HOST")
config.socket_options = socket_options
with patch("petstore_api.rest.urllib3.PoolManager", StubPoolManager):
api_client = petstore_api.ApiClient(config)
# urllib3.PoolManager promises to pass socket_options in kwargs
# to the underlying socket. So asserting that our manager
# gets it is a good start
assert api_client.rest_client.pool_manager.actual_kwargs["socket_options"] == socket_options
def test_socket_options_get_passed_to_proxy_manager(self):
socket_options = ["extra", "socket", "options"]
config = petstore_api.Configuration(host="HOST")
config.socket_options = socket_options
config.proxy = True
with patch("petstore_api.rest.urllib3.ProxyManager", StubProxyManager):
api_client = petstore_api.ApiClient(config)
# urllib3.ProxyManager promises to pass socket_options in kwargs
# to the underlying socket. So asserting that our manager
# gets it is a good start
assert api_client.rest_client.pool_manager.actual_kwargs["socket_options"] == socket_options
|
# command-line options: a list of (args, kwargs) tuples to add_argument
options = [
(["hostlist"],
{"help" : "the list of hosts to scan" }),
(["--thread-num", "-n"],
{"help" : "number of worker threads",
"metavar" : "N",
"dest" : "THREAD_NUM",
"type" : int,
"default" : 200 }),
([ "--db-filename", "-db" ],
{"help" : "the sqlite3 database file to use",
"nargs" : 1,
"metavar" : "DB",
"dest" : "DB_FILENAME",
"default" : "data/results.db"}),
([ "--print-freq", "-f" ],
{"help" : "progress report frequency",
"metavar" : "FREQ",
"dest" : "PRINT_FREQ",
"type" : int,
"default" : 10}),
([ "-q" ],
{"help" : "internal queue size",
"metavar" : "Q_SIZE",
"dest" : "MAX_Q_SIZE",
"type" : int,
"default" : 20}),
([ "--suspend-to" ],
{"help" : "where to dump program state when suspending",
"metavar" : "STATE_FILE",
"dest" : "SUSP_FILENAME",
"default" : "data/progstate.dump"}),
([ "--resume-from" ],
{"help" : "load state from this file and continue scanning",
"metavar" : "STATE_FILE",
"dest" : "STATE_FILE"}),
([ "--repeat" ],
{"help" : "scan continuously",
"action" : "store_true",
"dest" : "REPEAT",
"default" : False}),
([ "--with-delay" ],
{"help" : "set delay between repeated scans, in seconds; "
"this implies --repeat",
"metavar" : "REPEAT_DELAY",
"dest" : "REPEAT_DELAY",
"type" : int,
"default" : 0}),
]
|
"""
python version of SurfStatColormap
"""
# Author: RRC
# License: BSD 3 clause
import numpy as np
@deprecated("BrainSpace dependency")
def py_SurfStatColormap(map):
"""Colormap function for SurfStatView.
Usage: SurfStatColormap( map );
Same as for matlab's colormap function - see help colormap.
"""
sys.exit("Function py_SurfStatColormap is now a BrainSpace dependency")
|
"""Dataloader helper functions. Synchronize slices for both data and label."""
__all__ = ['split_data', 'split_and_load']
from mxnet import ndarray
def split_data(data, num_slice, batch_axis=0, even_split=True, multiplier=1):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
multiplier : int, default 1
The batch size has to be the multiples of multiplier
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
step = (int(size / multiplier) // num_slice) * multiplier
# If size < num_slice, make fewer slices
if not even_split and size < num_slice:
step = 1
num_slice = size
if batch_axis == 0:
slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size]
for i in range(num_slice)]
elif even_split:
slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis)
else:
slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step)
if i < num_slice - 1 else
ndarray.slice_axis(data, batch_axis, i*step, size)
for i in range(num_slice)]
return slices
def split_and_load(data, ctx_list, batch_axis=0, even_split=True, multiplier=1):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
multiplier : int, default 1
The batch size has to be the multiples of channel multiplier
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if not isinstance(data, ndarray.NDArray):
data = ndarray.array(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split, multiplier)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
|
# -*- coding: utf-8 -*-
"""HydraTK message router
.. module:: core.messagerouter
:platform: Unix
:synopsis: HydraTK message router
.. moduleauthor:: Petr Czaderna <pc@hydratk.org>
"""
import os
from hydratk.core import const, message
from hydratk.lib.exceptions.inputerror import InputError
import zmq
ERROR_ROUTER_ID_EXISTS = 1 # An existing router id
ERROR_SERVICE_ID_EXISTS = 10 # An existing service id
ERROR_SERVICE_INVALID_TRANSPORT_TYPE = 11 # Invalid transport type
SERVICE_TRANSPORT_TYPE_ZMQ_IPC = 1 # ZeroMQ IPC
SERVICE_TRANSPORT_TYPE_ZMQ_TCP = 2 # ZeroMQ TCP
MESSAGE_QUEUE_ACTION_BIND = 1
MESSAGE_QUEUE_ACTION_CONNECT = 2
class MessageRouter():
"""Class MessageRoute
"""
_service_list = {}
_id = ''
_trn = None
def __init__(self, id):
"""Class constructor
Called when object is initialized
Args:
id (str): message router id
Returns:
void
Raises:
error: TypeError
"""
from hydratk.core.masterhead import MasterHead
self._id = id
self._trn = MasterHead.get_head().get_translator()
def register_service(self, id, transport_type, options):
"""Method will add router service identificator using specified parameters
Args:
id (str): service identifier
transport_type (int): supported transport type, currently only IPC and TCP is supported
options (dict): transport_type supported options
Returns:
bool: True
Raises:
error: InputError
"""
if id != '' and id not in self._service_list.keys():
service = {}
if (transport_type in (SERVICE_TRANSPORT_TYPE_ZMQ_IPC, SERVICE_TRANSPORT_TYPE_ZMQ_TCP)):
service['transport_type'] = transport_type
service['active'] = False
service['options'] = options
self._service_list[id] = service
else:
raise InputError(ERROR_SERVICE_INVALID_TRANSPORT_TYPE, id, self._trn.msg(
'htk_mrouter_sid_invalid_tt', transport_type))
else:
raise InputError(
ERROR_SERVICE_ID_EXISTS, id , self._trn.msg('htk_mrouter_sid_exists', id))
return True
def get_queue(self, service_id, action, options={}):
"""Method will return a new instance of queue object for specified service_id
Args:
service_id (str): service identifier
options (dict): queue type optional settings
Returns:
obj: socket
"""
from hydratk.lib.debugging.simpledebug import dmsg
q = False
if service_id != '' and service_id in self._service_list:
service = self._service_list[service_id]
service_options = service['options']
addr_prefix = 'ipc://' if (service['transport_type']
== SERVICE_TRANSPORT_TYPE_ZMQ_IPC) else 'tcp://'
context = zmq.Context()
q = context.socket(options['socket_type'])
if (action == MESSAGE_QUEUE_ACTION_BIND):
if (service['active'] == False):
if (service['transport_type'] == SERVICE_TRANSPORT_TYPE_ZMQ_IPC):
file_path = os.path.dirname(service_options['address'])
if (not os.path.exists(file_path)):
os.makedirs(file_path)
''' TODO set optimal default directory permission '''
dmsg("Binding to message queue {0} : socket type {1}".format(
addr_prefix + service_options['address'], options['socket_type']))
q.bind(addr_prefix + service_options['address'])
service['active'] = True
self._service_list[service_id] = service
else:
raise Exception(
"Service queue is active use MESSAGE_QUEUE_ACTION_CONNECT instead")
elif (action == MESSAGE_QUEUE_ACTION_CONNECT):
q.connect(addr_prefix + service_options['address'])
dmsg("Connecting to message queue {0} : socket type {1}".format(
addr_prefix + service_options['address'], options['socket_type']))
else:
pass
''' TODO invalid action '''
return q
def get_service_address(self, service_id):
"""Method gets service address
Args:
service_id (str): service identifier
Returns:
str: address
"""
service = self._service_list[service_id]
service_options = service['options']
return service_options['address']
|
class Solution:
def tribonacci(self, n: int) -> int:
initial = [0,1,1]
while len(initial)<=n:
initial.append(sum(initial[-3:]))
return initial[n]
|
class Dimensao:
def __init__(self):
self._nome = None
self._rotas = None
self._ativo = True
def set_nome(self, nome):
self._nome = nome
def get_nome(self):
return self._nome
def set_rotas(self, rotas):
self._rotas = rotas
def get_rotas(self):
return self._rotas
def set_ativo(self, ativo):
self._ativo = ativo
def get_ativo(self):
return self._ativo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.