content
stringlengths
5
1.05M
# spinnman imports from multiprocessing.pool import ThreadPool from spinnman.connections.udp_packet_connections.\ udp_eieio_connection import UDPEIEIOConnection from spinnman.messages.eieio.command_messages.database_confirmation import \ DatabaseConfirmation # front end common imports from spinn_front_end_common.utilities import constants from spinn_front_end_common.utilities import exceptions import logging import traceback logger = logging.getLogger(__name__) class NotificationProtocol(object): """ The protocol which hand shakes with external devices about the\ database and starting execution """ def __init__(self, socket_addresses, wait_for_read_confirmation): self._socket_addresses = socket_addresses # Determines whether to wait for confirmation that the database # has been read before starting the simulation self._wait_for_read_confirmation = wait_for_read_confirmation self._wait_pool = ThreadPool(processes=1) self._data_base_message_connections = list() for socket_address in socket_addresses: self._data_base_message_connections.append(UDPEIEIOConnection( local_port=socket_address.listen_port, remote_host=socket_address.notify_host_name, remote_port=socket_address.notify_port_no)) def wait_for_confirmation(self): """ if asked to wait for confirmation, waits for all external systems\ to confirm that they are configured and have read the database :return: """ logger.info("*** Awaiting for a response from an external source " "to state its ready for the simulation to start ***") self._wait_pool.close() self._wait_pool.join() def send_start_notification(self): """ either waits till all sources have confirmed read the database and\ are configured, and/or just sends the start notification\ (when the system is executing) :return: """ if self._wait_for_read_confirmation: self.wait_for_confirmation() eieio_command_message = DatabaseConfirmation() for connection in self._data_base_message_connections: connection.send_eieio_message(eieio_command_message) # noinspection PyPep8 def send_read_notification(self, database_path): """ sends notifications to all devices which have expressed an interest in when the databse has been written :param database_path: the path to the database file :return: """ if database_path is not None: self._wait_pool.apply_async(self._send_read_notification, args=[database_path]) def _send_read_notification(self, database_path): """ sends notfications to a list of socket addresses that the database has been written. Messgae also includes the path to the database :param database_path: the path to the database :return: None """ # noinspection PyBroadException if database_path is not None: try: self._sent_visualisation_confirmation = True # add file path to database into command message. number_of_chars = len(database_path) if number_of_chars > constants.MAX_DATABASE_PATH_LENGTH: raise exceptions.ConfigurationException( "The file path to the database is too large to be " "transmitted via the command packet, " "please set the file path manually and " "set the .cfg parameter [Database] send_file_path " "to False") eieio_command_message = DatabaseConfirmation(database_path) # Send command and wait for response logger.info( "*** Notifying external sources that the database is " "ready for reading ***") # noinspection PyBroadException try: for connection in self._data_base_message_connections: connection.send_eieio_message(eieio_command_message) # if the system needs to wait, try receiving a packet back if self._wait_for_read_confirmation: for connection in self._data_base_message_connections: connection.receive_eieio_message() logger.info("*** Confirmation received, continuing ***") except Exception: logger.warning("*** Failed to notify external application" " about the database - continuing ***") except Exception: traceback.print_exc() def close(self): """ Closes the thread pool :return: """ self._wait_pool.close()
#!/usr/bin/env python3 import os, glob, itertools def add_line_prefix_to_file(fname, out_name, line_transform, validate): with open(fname) as f_in: with open(out_name, 'w') as f_out: print('Write:', fname, '->', out_name) for line in f_in: line = line.strip() if validate(line): f_out.write(line_transform(line) + '\n') def find_files(f_pattern): return sorted(f for f in glob.glob(f_pattern) if os.path.isfile(f)) def add_line_prefix_to_matches(f_pattern, out_dir, fname_transform, line_transform, validate): file_path = None for file_path in find_files(f_pattern): in_dir, fname = os.path.split(file_path) out_name = fname_transform(fname) out_path = os.path.join(out_dir, out_name) add_line_prefix_to_file(file_path, out_path, line_transform, validate) if file_path is None: print('Error: No files matching pattern: ""'.format(f_pattern)) def main(): def fname_transform(fname): fbase, fext = os.path.splitext(fname) return 'results-' + fbase + '.log' def validate(line): try: val = float(line) return True except ValueError: print('Warning: Invalid line: "{}"'.format(line)) return False out_dir = '../results-2018-11-pranav/' def run(pattern, params): prefix = params + ': 0.1.0 -1 ' line_transform = lambda line: prefix + line add_line_prefix_to_matches(pattern, out_dir, fname_transform, line_transform, validate) # cnx-btb # cnx-lin # cnx-lin-borrowed-1 # sc-current # sc # sc-better-t1 # sc-better-gates # sc-better-t1-and-gates # ti-qubit # ti-bare # ti-dressed run('bare-qutrit-*.txt', '{"n": 14, "noise": "ti-bare", "circ": "cnx-btb"}') run('dressed-qutrit-*.txt', '{"n": 14, "noise": "ti-dressed", "circ": "cnx-btb"}') run('current-sc-qutrit-*.txt', '{"n": 14, "noise": "sc-current", "circ": "cnx-btb"}') run('future-better-gates-and-t1-qutrit-*.txt', '{"n": 14, "noise": "sc-better-t1-and-gates", "circ": "cnx-btb"}') run('future-better-gates-qutrit-*.txt', '{"n": 14, "noise": "sc-better-gates", "circ": "cnx-btb"}') run('future-better-t1-qutrit-*.txt', '{"n": 14, "noise": "sc-better-t1", "circ": "cnx-btb"}') run('future-sc-qutrit-*.txt', '{"n": 14, "noise": "sc", "circ": "cnx-btb"}') # TODO: Fix the file names and add more if __name__ == '__main__': main()
#Desafio: Crie um programa onde o usuário digite uma expressão qualquer que use parênteses. # Seu aplicativo deverá analisar se a expressão passada está com os parenteses abertos e fechados em ordem correta. num = list num = input('Digite a expressão: ') if num.count('(') == num.count(')'): print('Sua expressão está válida') else: print('Sua expressão está errada!')
import re from pyramid_debugtoolbar.tbtools import Traceback from pyramid_debugtoolbar.panels import DebugPanel from pyramid_debugtoolbar.utils import escape from pyramid_debugtoolbar.utils import STATIC_PATH from pyramid_debugtoolbar.utils import ROOT_ROUTE_NAME from pyramid_debugtoolbar.utils import EXC_ROUTE_NAME _ = lambda x: x class TracebackPanel(DebugPanel): name = 'Traceback' template = 'pyramid_debugtoolbar.panels:templates/traceback.dbtmako' def __init__(self, request): self.request = request self.exc_history = request.exc_history def nav_title(self): return _("Traceback") def nav_subtitle(self): return "" def title(self): return _("Traceback") def url(self): return "" @property def has_content(self): if hasattr(self.request, 'pdbt_tb'): return True else: return False def process_response(self, response): if self.has_content: traceback = self.request.pdbt_tb exc = escape(traceback.exception) summary = Traceback.render_summary(traceback, include_title=False, request=self.request) token = self.request.registry.pdtb_token url = '' # self.request.route_url(EXC_ROUTE_NAME, _query=qs) evalex = self.exc_history.eval_exc self.data = { 'evalex': evalex and 'true' or 'false', 'console': 'false', 'lodgeit_url': None, 'title': exc, 'exception': exc, 'exception_type': escape(traceback.exception_type), 'summary': summary, 'plaintext': traceback.plaintext, 'plaintext_cs': re.sub('-{2,}', '-', traceback.plaintext), 'traceback_id': traceback.id, 'token': token, 'url': url, } def render_content(self, request): return super(TracebackPanel, self).render_content(request) def render_vars(self, request): return { 'static_path': request.static_url(STATIC_PATH), 'root_path': request.route_url(ROOT_ROUTE_NAME) }
from rest_framework import serializers from django.contrib.auth import get_user_model from django.contrib.auth.models import Group from django.db import transaction from .validators import ProtectedGroupsValidator from .validators import SomeSuperuserLeftValidator class CreatableSlugRelatedField(serializers.SlugRelatedField): def to_internal_value(self, data): try: return self.get_queryset().get_or_create( **{self.slug_field: data} )[0] except serializers.ObjectDoesNotExist: self.fail("does_not_exist", slug_name=self.slug_field, value=data) except (TypeError, ValueError): self.fail("invalid") class UserSerializer(serializers.ModelSerializer): name = serializers.CharField(required=False, allow_blank=True) groups = serializers.ListSerializer( child=CreatableSlugRelatedField( slug_field="name", queryset=Group.objects.all() ), validators=[ProtectedGroupsValidator(), SomeSuperuserLeftValidator()], default=[], ) hasPassword = serializers.BooleanField(source="is_active", read_only=True) allowedDatasets = serializers.ListSerializer( required=False, read_only=True, source="allowed_datasets", child=serializers.CharField() ) class Meta(object): model = get_user_model() fields = ( "id", "email", "name", "hasPassword", "groups", "allowedDatasets",) def run_validation(self, data): email = data.get("email") if email: email = get_user_model().objects.normalize_email(email) email = email.lower() data["email"] = email groups = data.get("groups") if groups: new_groups = [] for group in groups: if group.lower() == email: new_groups.append(email) else: new_groups.append(group) data["groups"] = new_groups return super().run_validation(data=data) def validate(self, data): unknown_keys = set(self.initial_data.keys()) - set(self.fields.keys()) if unknown_keys: raise serializers.ValidationError( "Got unknown fields: {}".format(unknown_keys) ) return super(UserSerializer, self).validate(data) @staticmethod def _check_groups_exist(groups): if groups: db_groups_count = Group.objects.filter(name__in=groups).count() assert db_groups_count == len(groups), "Not all groups exists.." @staticmethod def _update_groups(user, new_groups): with transaction.atomic(): protected_groups = set([ group.id for group in user.protected_groups]) to_remove = set() for group in user.groups.all(): if group.id not in protected_groups: to_remove.add(group.id) to_add = set() for group in new_groups: if group.id in to_remove: to_remove.remove(group.id) else: to_add.add(group.id) user.groups.add(*to_add) user.groups.remove(*to_remove) def update(self, instance, validated_data): groups = validated_data.pop("groups", None) self._check_groups_exist(groups) with transaction.atomic(): super(UserSerializer, self).update(instance, validated_data) if groups: db_groups = Group.objects.filter(name__in=groups) self._update_groups(instance, db_groups) return instance def create(self, validated_data): groups = validated_data.pop("groups", None) self._check_groups_exist(groups) with transaction.atomic(): instance = super(UserSerializer, self).create(validated_data) if groups: db_groups = Group.objects.filter(name__in=groups) self._update_groups(instance, db_groups) return instance class UserWithoutEmailSerializer(UserSerializer): class Meta(object): model = get_user_model() fields = tuple(x for x in UserSerializer.Meta.fields if x != "email") class BulkGroupOperationSerializer(serializers.Serializer): userIds = serializers.ListSerializer(child=serializers.IntegerField()) groups = serializers.ListSerializer(child=serializers.CharField()) def create(self, validated_data): raise NotImplementedError() def to_representation(self, instance): raise NotImplementedError() def update(self, instance, validated_data): raise NotImplementedError() def to_internal_value(self, data): return super(BulkGroupOperationSerializer, self).to_internal_value( data )
from peewee import * db = SqliteDatabase('iso639corpora.db') class Languages(Model): name = CharField() part1 = CharField(null=True) part2 = CharField(null=True) part3 = CharField(null=True) text = TextField(null=True) class Meta: database = db # This model uses the "people.db" database.
# -*- coding: utf-8 -*- # Copyright 2018 Carsten Blank # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" QmlGenericStateCircuitBuilder ============================== .. currentmodule:: dc_qiskit_qml.distance_based.hadamard.state._QmlGenericStateCircuitBuilder The generic state circuit builder classically computes the necessary quantum state vector (sparse) and will use a state preparing quantum routine that takes the state vector and creates a circuit. This state preparing quantum routine must implement :py:class:`QmlSparseVectorFactory`. .. autosummary:: :nosignatures: QmlGenericStateCircuitBuilder QmlGenericStateCircuitBuilder ############################## .. autoclass:: QmlGenericStateCircuitBuilder :members: """ import logging from typing import List, Optional import numpy as np from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from scipy import sparse from ._QmlStateCircuitBuilder import QmlStateCircuitBuilder from .sparsevector import QmlSparseVectorStatePreparation log = logging.getLogger('QmlGenericStateCircuitBuilder') class RegisterSizes: count_of_samples: int index_of_samples_qubits: int sample_space_dimensions_qubits: int ancilla_qubits: int label_qubits: int total_qubits: int def __init__(self, count_of_samples, index_of_samples_qubits, sample_space_dimensions_qubits, ancilla_qubits, label_qubits, total_qubits): # type: (int, int, int, int, int, int) -> None self.count_of_samples = count_of_samples self.index_of_samples_qubits = index_of_samples_qubits self.sample_space_dimensions_qubits = sample_space_dimensions_qubits self.ancilla_qubits = ancilla_qubits self.label_qubits = label_qubits self.total_qubits = total_qubits class QmlGenericStateCircuitBuilder(QmlStateCircuitBuilder): """ From generic training and testing data creates the quantum state vector and applies a quantum algorithm to create a circuit. """ def __init__(self, state_preparation): # type: (QmlGenericStateCircuitBuilder, QmlSparseVectorStatePreparation) -> None """ Create a new object, use the state preparation routine :param state_preparation: The quantum state preparation routine to encode the quantum state """ self.state_preparation = state_preparation self._last_state_vector = None # type: sparse.dok_matrix @staticmethod def get_binary_representation(sample_index, sample_label, entry_index, is_input, register_sizes): # type: (int, int, int, bool, RegisterSizes) -> str """ Computes the binary representation of the quantum state as `str` given indices and qubit lengths :param sample_index: the training data sample index :param sample_label: the training data label :param entry_index: the data sample vector index :param is_input: True if the we encode the input instead of the training vector :param register_sizes: qubits needed for the all registers :return: binary representation of which the quantum state being addressed """ sample_index_b = "{0:b}".format(sample_index).zfill(register_sizes.index_of_samples_qubits) sample_label_b = "{0:b}".format(sample_label).zfill(register_sizes.label_qubits) ancillary_b = '0' if is_input else '1' entry_index_b = "{0:b}".format(entry_index).zfill(register_sizes.sample_space_dimensions_qubits) # Here we compose the qubit, the ordering will be essential # However keep in mind, that the order is LSB qubit_composition = [ sample_label_b, entry_index_b, sample_index_b, ancillary_b ] return "".join(qubit_composition) @staticmethod def _get_register_sizes(X_train, y_train): # type: (List[sparse.dok_matrix], np.ndarray) -> RegisterSizes count_of_samples, sample_space_dimension = len(X_train), X_train[0].get_shape()[0] count_of_distinct_classes = len(set(y_train)) index_of_samples_qubits_needed = (count_of_samples - 1).bit_length() sample_space_dimensions_qubits_needed = (sample_space_dimension - 1).bit_length() ancilla_qubits_needed = 1 label_qubits_needed = (count_of_distinct_classes - 1).bit_length() if count_of_distinct_classes > 1 else 1 total_qubits_needed = (index_of_samples_qubits_needed + ancilla_qubits_needed + sample_space_dimensions_qubits_needed + label_qubits_needed) return RegisterSizes( count_of_samples, index_of_samples_qubits_needed, sample_space_dimensions_qubits_needed, ancilla_qubits_needed, label_qubits_needed, total_qubits_needed ) @staticmethod def assemble_state_vector(X_train, y_train, X_input): # type: (List[sparse.dok_matrix], any, sparse.dok_matrix) -> sparse.dok_matrix """ This method assembles the state vector for given data. The X data for training (which is a list of sparse vectors) is encoded into a sub-space, in the other orthogonal sub-space the same input is encoded everywhere. Also, the label is encoded. A note: this method is used in conjunction with a generic state preparation, and this may not be the most efficient way. :param X_train: The training data set :param y_train: the training class label data set :param X_input: the unclassified input data vector :return: The """ register_sizes = QmlGenericStateCircuitBuilder._get_register_sizes(X_train, y_train) state_vector = sparse.dok_matrix((2 ** register_sizes.total_qubits, 1), dtype=complex) # type: sparse.dok_matrix factor = 2 * register_sizes.count_of_samples * 1.0 for index_sample, (sample, label) in enumerate(zip(X_train, y_train)): for (i, j), sample_i in sample.items(): qubit_state = QmlGenericStateCircuitBuilder.get_binary_representation( index_sample, label, i, is_input=False, register_sizes=register_sizes ) state_index = int(qubit_state, 2) log.debug("Sample Entry: %s (%d): %.2f.", qubit_state, state_index, sample_i) state_vector[state_index, 0] = sample_i for (i, j), input_i in X_input.items(): qubit_state = QmlGenericStateCircuitBuilder.get_binary_representation( index_sample, label, i, is_input=True, register_sizes=register_sizes ) state_index = int(qubit_state, 2) log.debug("Input Entry: %s (%d): %.2f.", qubit_state, state_index, input_i) state_vector[state_index, 0] = input_i state_vector = state_vector * (1 / np.sqrt(factor)) return state_vector def build_circuit(self, circuit_name, X_train, y_train, X_input): # type: (QmlGenericStateCircuitBuilder, str, List[sparse.dok_matrix], any, sparse.dok_matrix) -> QuantumCircuit """ Build a circuit that encodes the training (samples/labels) and input data sets into a quantum circuit. It does so by iterating through the training data set with labels and constructs upon sample index and vector position the to be modified amplitude. The state vector is stored into a sparse matrix of shape (n,1) which is stored and can be accessed through :py:func:`get_last_state_vector` for debugging purposes. Then the routine uses a :py:class:`QmlSparseVectorStatePreparation` routine to encode the calculated state vector into a quantum circuit. :param circuit_name: The name of the quantum circuit :param X_train: The training data set :param y_train: the training class label data set :param X_input: the unclassified input data vector :return: The circuit containing the gates to encode the input data """ log.debug("Preparing state.") log.debug("Raw Input Vector: %s" % X_input) register_sizes = QmlGenericStateCircuitBuilder._get_register_sizes(X_train, y_train) log.info("Qubit map: index=%d, ancillary=%d, feature=%d, label=%d", register_sizes.index_of_samples_qubits, register_sizes.ancilla_qubits, register_sizes.sample_space_dimensions_qubits, register_sizes.label_qubits) ancilla = QuantumRegister(register_sizes.ancilla_qubits, "a") index = QuantumRegister(register_sizes.index_of_samples_qubits, "i") data = QuantumRegister(register_sizes.sample_space_dimensions_qubits, "f^S") qlabel = QuantumRegister(register_sizes.label_qubits, "l^q") clabel = ClassicalRegister(register_sizes.label_qubits, "l^c") branch = ClassicalRegister(1, "b") qc = QuantumCircuit(ancilla, index, data, qlabel, clabel, branch, name=circuit_name) # type: QuantumCircuit self._last_state_vector = QmlGenericStateCircuitBuilder.assemble_state_vector(X_train, y_train, X_input) return self.state_preparation.prepare_state(qc, self._last_state_vector) def is_classifier_branch(self, branch_value): # type: (QmlGenericStateCircuitBuilder, int) -> bool """ As each state preparation algorithm uses a unique layout. Here the :py:class:`QmlSparseVectorFactory` is asked how the branch for post selection can be identified. :param branch_value: The measurement of the branch :return: True is the branch is containing the classification, False if not """ return self.state_preparation.is_classifier_branch(branch_value) def get_last_state_vector(self): # type: (QmlGenericStateCircuitBuilder) -> Optional[sparse.dok_matrix] """ From the last call of :py:func:`build_circuit` the computed (sparse) state vector. :return: a sparse vector (shape (n,0)) if present """ return self._last_state_vector
import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy as np import os import math import torch.nn.functional as nn from torch.autograd import Variable import torch from tensorflow.examples.tutorials.mnist import input_data from sklearn import manifold def P(z): mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True) mb_size = 64 Z_dim = 100 X_dim = mnist.train.images.shape[1] y_dim = mnist.train.labels.shape[1] h_dim = 128 c = 0 lr = 1e-3 def xavier_init(size): in_dim = size[0] xavier_stddev = 1. / np.sqrt(in_dim / 2.) return Variable(torch.randn(*size) * xavier_stddev, requires_grad=True) Wzh = xavier_init(size=[Z_dim, h_dim]) bzh = Variable(torch.zeros(h_dim), requires_grad=True) Whx = xavier_init(size=[h_dim, X_dim]) bhx = Variable(torch.zeros(X_dim), requires_grad=True) h = nn.relu(z @ Wzh + bzh.repeat(z.size(0), 1)) X = nn.sigmoid(h @ Whx + bhx.repeat(h.size(0), 1)) return X def plot_a_numpy_array(a_numpy_array): print('a numpy array: ', a_numpy_array.shape) if len(a_numpy_array.shape) != 2: if len(a_numpy_array.shape) == 3: side_size = a_numpy_array.shape[-1] else: side_size = int(math.sqrt(a_numpy_array.shape[0])) print(a_numpy_array.reshape(side_size, side_size).shape) plt.imshow(a_numpy_array.reshape(side_size, side_size), cmap='Greys_r') plt.show() else: plt.imshow(a_numpy_array, cmap='Greys_r') plt.show() def plot_a_numpy_sample(samples, to_save=False, file_name=None): fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='Greys_r') if to_save: if not os.path.exists('out/'): os.makedirs('out/') plt.savefig('out/' + file_name.replace('original_np_outs/', '').replace('.out', '') + '.png') plt.close(fig) else: plt.show() def plot_a_numpy_file(file_name, to_save=False): samples = np.loadtxt(file_name, delimiter=',') if to_save: plot_a_numpy_sample(samples, to_save=to_save, file_name=file_name) else: plot_a_numpy_sample(samples, to_save=to_save) def decode_a_z(an_z): if isinstance(an_z, np.ndarray): an_z = Variable(torch.from_numpy(an_z)) samples = P(an_z).data.numpy()[:16] plot_a_numpy_sample(samples) def decode_a_z_file(an_z_file): an_z = np.loadtxt(an_z_file, delimiter=',') decode_a_z(an_z) # plot_a_numpy_file('original_np_outs/original_samples_99000.out')
import sys from rebus.agent import Agent @Agent.register class Search(Agent): _name_ = "search" _desc_ = "Output a list of selectors for descriptors that match provided "\ "domain, selector prefix and value regex" _operationmodes_ = ('automatic', ) @classmethod def add_arguments(cls, subparser): subparser.add_argument("--domain", help="Descriptor domain", default="default") subparser.add_argument("selector_prefix", nargs=1, help="Selector prefix") subparser.add_argument("value_regex", nargs=1, help="Regex that the " "value has to match (from its beginning)") def run(self): matches = self.bus.find_by_value(self, self.config['domain'], self.config['selector_prefix'][0], self.config['value_regex'][0]) if len(matches) == 0: sys.stdout.write('No match found.\n') for match in matches: sys.stdout.write(str(match.selector)) sys.stdout.write('\n')
import random import numpy as np import os import torch import h5py import argparse import json import torchvision.transforms as transforms import sys sys.path.insert(0, "../") from tkinter import filedialog from tkinter import * from PIL import Image, ImageTk from torch.autograd import Variable from dense_coattn.modules import LargeEmbedding from dense_coattn.model import DCN, DCNWithAns, DCNWithRCNN, DCNWithRCNNAns from nltk.tokenize import word_tokenize import nltk nltk.data.path.append("/ceph/kien/nltk_data") UNK_WORD = "<unk>" transform = transforms.Compose([ transforms.Resize((448, 448)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) def move_to_cuda(tensors, devices=None): if devices is not None: if len(devices) >= 1: cuda_tensors = [] for tensor in tensors: if tensor is not None: cuda_tensors.append(tensor.cuda(devices[0], async=True)) else: cuda_tensors.append(None) return tuple(cuda_tensors) return tensors def get_ques(sentence, word2idx, max_len_ques=14): processed_sen = word_tokenize(str(sentence).lower()) final_ques = [w if w in word2idx else UNK_WORD for w in processed_sen] ques = torch.zeros(1, max_len_ques).long() for i, word in enumerate(final_ques): if i < max_len_ques: ques[0, i] = word2idx[word] ques_mask = ques.ne(0).float() return ques, ques_mask def get_img(img_path): return transform(Image.open(img_path).convert("RGB")).unsqueeze(0) def answer(sample, model, idx2ans, opt): """ Generate answers for testing the model. -------------------- Arguments: dataloader: dataloader to provide data for the network. model: our trained network. idx2ans: set of possible answers. opt: testing option. """ img, ques, ques_mask = sample img = Variable(img, volatile=True) ques = Variable(ques, volatile=True) ques_mask = Variable(ques_mask, volatile=True) img, ques, ques_mask = move_to_cuda((img, ques, ques_mask), devices=opt.gpus) ques = model.word_embedded(ques) score = model(img, ques, None, ques_mask, is_train=False) _, inds = torch.sort(score, dim=1, descending=True) answer = [idx2ans[inds.data[0, i]] for i in range(opt.top_ans)] return answer def load_pretrained_model(opt): """ Generating answers for (image, question) pair in the dataset. """ data_info = torch.load(os.path.join(opt.data_path, "%s_info.pt" % opt.data_name)) word2idx = data_info["word2idx"] idx2word = data_info["idx2word"] idx2ans = data_info["idx2ans"] print("Building model...") word_embedded = LargeEmbedding(len(idx2word), 300, padding_idx=0, devices=opt.gpus) word_embedded.load_pretrained_vectors(opt.word_vectors) dict_checkpoint = opt.train_from if dict_checkpoint: print("Loading model from checkpoint at %s" % dict_checkpoint) model = torch.load(dict_checkpoint) if len(opt.gpus) >= 1: model.cuda(opt.gpus[0]) model.word_embedded = word_embedded model.eval() return model, idx2ans, word2idx, opt class Window(Frame): def __init__(self, master, model, idx2ans, word2idx, opt): Frame.__init__(self, master) self.master = master self.var = StringVar() self.img_path = None self.model = model self.idx2ans = idx2ans self.word2idx = word2idx self.opt = opt self.img_tensor = None self.answers = None self.img = None self.init_window() def init_window(self): self.master.title("DenseCoAttn demo!") self.pack(fill=BOTH, expand=1) menu = Menu(self.master) self.master.config(menu=menu) file = Menu(menu) file.add_command(label="Upload", command=self.showImg) file.add_command(label="Exit", command=self.client_exit) menu.add_cascade(label="File", menu=file) text = Label(self, text="Possible answers:") text.place(x=600, y=10) textbox = Entry(self.master, textvariable=self.var, width=70) textbox.focus_set() textbox.pack(pady=10, padx=10) button = Button(self.master, text="Answer", width=10, command=self.submitQues) button.pack() def showImg(self, max_size=500): img_path = filedialog.askopenfilename(initialdir = "./", title = "Select file") print(img_path) if img_path != "": self.img_tensor = transform(Image.open(img_path).convert("RGB")).unsqueeze(0) print(self.img_tensor.size()) img_data = Image.open(img_path) width, height = img_data.size if width > height: height = height * max_size / width width = max_size else: width = width * max_size / height height = max_size width = int(width) height = int(height) render = ImageTk.PhotoImage(img_data.resize((width, height))) if self.img is not None: self.img.destroy() self.img = Label(self, image=render) self.img.image = render self.img.place(x=0, y=0) def submitQues(self): if self.answers is not None: self.answers.destroy() ques, ques_mask = get_ques(self.var.get(), self.word2idx) answers = answer((self.img_tensor, ques, ques_mask), self.model, self.idx2ans, self.opt) self.answers = Label(self, text="1. %s" % tuple(answers)) self.answers.place(x=600, y=50) def client_exit(self): exit() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--top_ans", type=int, default=1) parser.add_argument("--gpus", type=int, nargs="+", default=[0]) parser.add_argument("--data_path", type=str, default="dataset") parser.add_argument("--data_name", type=str, default="cocotrainval") parser.add_argument("--img_name", type=str, default="cocoimages") parser.add_argument("--word_vectors", type=str, default="dataset/glove_840B.pt") parser.add_argument("--train_from", default="model/pretrained_dcn.pt") args = parser.parse_args() params = vars(args) print("Parsed input parameters:") print(json.dumps(params, indent=2)) model, idx2ans, word2idx, opt = load_pretrained_model(args) root = Tk() root.geometry("800x500") app = Window(root, model, idx2ans, word2idx, opt) root.mainloop()
from decimal import Decimal from strawberry.utils.debug import pretty_print_graphql_operation def test_pretty_print(mocker): mock = mocker.patch("builtins.print") pretty_print_graphql_operation("Example", "{ query }", variables={}) mock.assert_called_with("{ \x1b[38;5;125mquery\x1b[39m }\n") def test_pretty_print_variables(mocker): mock = mocker.patch("builtins.print") pretty_print_graphql_operation("Example", "{ query }", variables={"example": 1}) mock.assert_called_with( '{\n \x1b[38;5;28;01m"example"\x1b[39;00m: \x1b[38;5;241m1\x1b[39m\n}\n' ) def test_pretty_print_variables_object(mocker): mock = mocker.patch("builtins.print") pretty_print_graphql_operation( "Example", "{ query }", variables={"example": Decimal(1)} ) mock.assert_called_with( '{\n \x1b[38;5;28;01m"example"\x1b[39;00m: ' "\x1b[38;5;124m\"Decimal('1')\"\x1b[39m\n}\n" )
# -*- coding: utf-8 -*- from .exec_env_config import ExecEnvConfig class DockerConfig(ExecEnvConfig): def __init__(self, parsed_yaml: {}, key: str): super().__init__(parsed_yaml, key) configs = parsed_yaml.get(key, {}) self.container_name = configs.get('container_name', '')
#!/usr/bin/env python3 import sys import os from systemrdl import RDLCompiler, RDLCompileError from ralbot.html import HTMLExporter # Collect SystemRDL input files from the command line arguments input_files = sys.argv[1:] # Create an instance of the compiler rdlc = RDLCompiler() try: # Compile all the files provided for input_file in input_files: rdlc.compile_file(input_file) # Elaborate the design root = rdlc.elaborate() except RDLCompileError: # A compilation error occurred. Exit with error code sys.exit(1) # Create an HTML exporter exporter = HTMLExporter() # Create HTML documentation exporter.export(root, "html")
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import unittest from api_list_data_source import APIListDataSource from server_instance import ServerInstance from test_file_system import TestFileSystem def _ToTestData(obj): '''Transforms |obj| into test data by turning a list of files into an object mapping that file to its contents (derived from its name). ''' return dict((name, name) for name in obj) def _ToTestFeatures(names): '''Transforms a list of strings into a minimal JSON features object. ''' return dict((name, {'name': name, 'platforms': platforms}) for name, platforms in names) _TEST_API_FEATURES = _ToTestFeatures([ ('alarms', ['apps', 'extensions']), ('app.window', ['apps']), ('browserAction', ['extensions']), ('experimental.bluetooth', ['apps']), ('experimental.history', ['extensions']), ('experimental.power', ['apps', 'extensions']), ('infobars', ['extensions']), ('something_internal', ['apps']), ('something_else_internal', ['extensions']), ('storage', ['apps', 'extensions']) ]) _TEST_DATA = { 'api': { '_api_features.json': json.dumps(_TEST_API_FEATURES), '_manifest_features.json': '{}', '_permission_features.json': '{}', }, 'docs': { 'templates': { 'json': { 'manifest.json': '{}', 'permissions.json': '{}', }, 'public': { 'apps': _ToTestData([ 'alarms.html', 'app_window.html', 'experimental_bluetooth.html', 'experimental_power.html', 'storage.html', ]), 'extensions': _ToTestData([ 'alarms.html', 'browserAction.html', 'experimental_history.html', 'experimental_power.html', 'infobars.html', 'storage.html', ]), }, }, }, } class APIListDataSourceTest(unittest.TestCase): def setUp(self): server_instance = ServerInstance.ForTest(TestFileSystem(_TEST_DATA)) self._factory = APIListDataSource.Factory( server_instance.compiled_fs_factory, server_instance.host_file_system_provider.GetTrunk(), server_instance.features_bundle, server_instance.object_store_creator) def testApps(self): api_list = self._factory.Create() self.assertEqual([ { 'name': 'alarms', 'platforms': ['apps', 'extensions'] }, { 'name': 'app.window', 'platforms': ['apps'] }, { 'name': 'storage', 'platforms': ['apps', 'extensions'], 'last': True }], api_list.get('apps').get('chrome')) def testExperimentalApps(self): api_list = self._factory.Create() self.assertEqual([ { 'name': 'experimental.bluetooth', 'platforms': ['apps'] }, { 'name': 'experimental.power', 'platforms': ['apps', 'extensions'], 'last': True }], sorted(api_list.get('apps').get('experimental'))) def testExtensions(self): api_list = self._factory.Create() self.assertEqual([ { 'name': 'alarms', 'platforms': ['apps', 'extensions'] }, { 'name': 'browserAction', 'platforms': ['extensions'] }, { 'name': 'infobars', 'platforms': ['extensions'] }, { 'name': 'storage', 'platforms': ['apps', 'extensions'], 'last': True }], sorted(api_list.get('extensions').get('chrome'))) def testExperimentalExtensions(self): api_list = self._factory.Create() self.assertEqual([ { 'name': 'experimental.history', 'platforms': ['extensions'] }, { 'name': 'experimental.power', 'platforms': ['apps', 'extensions'], 'last': True }], sorted(api_list.get('extensions').get('experimental'))) if __name__ == '__main__': unittest.main()
""" Sequential generator model - sample the outputs vector iteratively entry by entry. For an output vector of size n, n models are trained. First model learns the distribution of first entry in output vector. Second model learns the distribution of second entry, conditioned on first one. Thirs model learns the distribution of third entry, conditioned on first and second ones. ... """ import numpy as np from sklearn.base import clone, BaseEstimator from .base import GeneratorBase class ScalarGenerator(GeneratorBase): """ A model that can generate a single scalar value. Trains a regression model to simulate a conditional density function. See `fit` for further details on how the model is trained. Parameters ---------- estimator: BaseEstimator, regression model that will be used to simulate density function. """ def __init__(self, estimator): if not isinstance(estimator, BaseEstimator): raise ValueError('estimator should be of type BaseEstimator') self.estimator = estimator self.estimator_ = None self.m = None self.M = None def set_params(self, **params): """Delegate parameters to estimator""" self.estimator.set_params(**params) return self def fit(self, X, y, **kwargs): """Fit generative model to the data. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to condition the generative model's outputs. y : {array-like, sparse matrix}, shape [n_samples] The data that should be generated by particular model. """ # compute range of values of output m, M = np.min(y), np.max(y) self.m = m self.M = M interval = M - m # expand intervals a bit m -= interval * 0.1 M += interval * 0.1 # make a clone of estimator self.estimator_ = clone(self.estimator) # create a training dataset for estimator X_, y_ = [], [] N = len(y) # high density values (1.0) for correctly generated values X_.append(np.column_stack([X, y])) y_.append(np.ones(N)) # low density values (0.0) for incorrectly generated values for reps in range(5): random_y = np.random.rand(N)*(M - m) + m X_.append(np.column_stack([X, random_y])) y_.append(np.zeros(N)) # overlap of correctly and incorrectly generated values is # ok - then the output of model is ~ mean of vales # make matricies X_ = np.row_stack(X_) y_ = np.concatenate(y_) # fit the density surrogate self.estimator_.fit(X_, y_) def predict(self, X, **kwargs): """Generate a scalar value conditioned on input. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to condition the generative model's outputs. Output: ------ y : {array-like, sparse matrix}, shape [n_samples] The scalar outputs generated by the model. """ # range of all possible outputs yv = np.linspace(self.m, self.M, 10000) # matrix that will be used as output C = np.column_stack([X, np.ones(len(X))]) Yp = [] # get distributions for outputs for v in yv: C[:, -1] = v yp = self.estimator_.predict(C) Yp.append(yp) # distribution for a single output is a row Yp = np.column_stack(Yp) # normalize distribution values Yp = np.maximum(Yp, 0.0) Yp = (Yp.T / np.sum(Yp, axis=-1)).T # do interpolation here? # generate random values y = [ np.random.choice(yv, p=p) for p in Yp ] y = np.array(y) return y class SGM(GeneratorBase): """ This class generates desired output feature by feature. That is, for example with outputs Y \in R^(n, k), k scalar conditional generative models are build for every entry in output vector. Inspired by http://proceedings.mlr.press/v15/larochelle11a/larochelle11a.pdf https://arxiv.org/pdf/1606.05328.pdf """ def __init__(self, estimator): self.estimator = estimator self.models = None # models used for generation of output features, as well as scales for features def set_params(self, **params): """Delegate parameters to estimator""" self.estimator.set_params(**params) return self def fit(self, X, Y, **kwargs): """Fit generative models to the data Parameters ---------- Y : {array-like, sparse matrix}, shape [n_samples, n_output_features] The data that should be generated by particular model. X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to condition the generative model's outputs. """ # all inputs to the model self.models = [] for y in Y.T: # fit the model model = clone(self.estimator) model.fit(X, y) # save model and its range self.models.append(model) # generated output is not a condition X = np.column_stack([X, y]) return self def predict(self, X, **kwargs): """Generate samples using the generative model Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to condition the generative model's outputs. """ if not self.models: raise AssertionError('Model is not fitted yet. Please call fit first') # initial condition list Y = [] for model in self.models: y = model.predict(X) Y.append(y) X = np.column_stack([X, y]) # stack all outputs Y = np.column_stack(Y) return Y
from django.db import models import users.models from subscriptions.models import Subscription from django.utils.translation import gettext as _ # Create your models here. class Show(models.Model): TYPE_CHOICE_UNKNOWN = 0 TYPE_CHOICE_MOVIE = 1 TYPE_CHOICE_TV = 2 TYPE_CHOICES = ( (TYPE_CHOICE_UNKNOWN, _('Unknown')), (TYPE_CHOICE_MOVIE, _('Movie')), (TYPE_CHOICE_TV, _('TV')), ) name = models.CharField(max_length=255, unique=True) subscribers = models.ManyToManyField('users.User', through=Subscription) category = models.IntegerField( choices = TYPE_CHOICES, default = TYPE_CHOICE_UNKNOWN, ) is_active = models.BooleanField(default=False) def __str__(self): return self.name class Episode(models.Model): name = models.CharField(max_length=255) season = models.PositiveIntegerField(null=True, blank=True) show = models.ForeignKey(Show, on_delete=models.CASCADE) class Meta: unique_together = ( ("name", "season", "show"), ) def __str__(self): return self.name class Character(models.Model): first_name = models.CharField(max_length=255, blank=True) last_name = models.CharField(max_length=255, blank=True) show = models.ForeignKey(Show, on_delete=models.CASCADE) class Meta: unique_together = ( ("first_name", "last_name", "show"), ) def __str__(self): return f"{self.first_name} {self.last_name}"
from yml2sif import dict_to_sif from os import sys import yaml def test_dict_to_sif(): ymlfile = open('./esim1.yml', 'r') siffile = sys.stdout ymldata = yaml.load(ymlfile.read()) dict_to_sif(ymldata, siffile) ymlfile.close() if __name__=="__main__": test_dict_to_sif()
def remove_outer_symbols(s): left = s.index("[") right = s.rindex("]", left) return s[:left] + s[left+1:right] + s[right+1:] async def embed_cmd(bot, discord, message, botconfig, platform, os, datetime, one_result, localization, unix_time_millis, embed_color, connection, cursor, prefix): args = message.content.split(" "); args2 = message.content.split("-[]"); parameter_option = "" args_str = " ".join(args[1:]) try: question_rindex = args_str.rindex('-t', 0) text = args_str[:question_rindex] options_str = "" options = [] endtimeerr = "" endtime = 0 for args_index in args: if args_index == "-t": parameter_option += '-t' for args_index in args2: try: if message.content.startswith(botconfig['prefix']): index = args_str.index('[') + 7 rindex = args_str.rindex(']') + 8 elif message.content.startswith(prefix): index = args_str.index('[') + 6 + len(prefix) rindex = args_str.rindex(']') + 7 + len(prefix) options_str += remove_outer_symbols(args_index[index:rindex]) options = options_str.split("],[") except: pass embed_title = "" embed_footer = "" for opt in options: if options.index(opt) == 0: embed_title = opt print('Yes! ' + embed_title+ "\n" + str(options)) if options.index(opt) == 1: embed_footer += opt if args[1] == "" or args[1] == None or args[2] == "" or parameter_option != '-t' or options == [] or args[2] == None: no_args = discord.Embed(title=localization[1][20][0], description=str(localization[1][20][1]).format(prefix), color=embed_color) return await message.channel.send(embed=no_args) embed = discord.Embed(title=embed_title, description=text, color=embed_color) embed.set_footer(text=embed_footer) msg = await message.channel.send(embed=embed) except Exception as e: print(e) no_args = discord.Embed(title=localization[1][16][0], description=str(localization[1][16][4]).format(prefix), color=embed_color) await message.channel.send(embed=no_args)
#!/usr/bin/env python3 """ Import packages """ import numpy as np import argparse import os import torch import torch.nn as nn import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader from net import ResBase, ResClassifier, RelativeRotationClassifier from data_loader import DatasetGeneratorMultimodal, MyTransformer from utils import OptimizerManager, EvaluationManager, IteratorWrapper, weights_init, default_paths, map_to_device, \ add_base_args, entropy_loss from tqdm import tqdm # Parse arguments parser = argparse.ArgumentParser() add_base_args(parser) parser.add_argument('--test_batches', default=100, type=int) args = parser.parse_args() # Load default paths if needed default_paths(args) # Run name hp_list = [args.task, args.net, args.epoch, args.lr, args.lr_mult, args.batch_size, args.weight_rot, args.weight_ent] hp_list = [str(hp) for hp in hp_list] hp_string = '_'.join(hp_list) + args.suffix print("Run: " + hp_string) # Tensorboard summary writer = SummaryWriter(log_dir=os.path.join(args.tensorboard, hp_string), flush_secs=5) # Device device = torch.device('cuda:{}'.format(args.gpu)) # Center crop, no random flip test_transform = MyTransformer([int((256 - 224) / 2), int((256 - 224) / 2)], False) """ Prepare datasets """ # Source: training set train_set_source = DatasetGeneratorMultimodal(args.data_root_source, args.train_file_source, do_rot=False) # Source: test set test_set_source = DatasetGeneratorMultimodal(args.data_root_source, args.test_file_source, do_rot=False, transform=test_transform) # Target: training set (for entropy) train_set_target = DatasetGeneratorMultimodal(args.data_root_target, args.train_file_target, ds_name=args.target, do_rot=False) # Source: training set (for relative rotation) rot_set_source = DatasetGeneratorMultimodal(args.data_root_source, args.train_file_source, do_rot=True) # Source: test set (for relative rotation) rot_test_set_source = DatasetGeneratorMultimodal(args.data_root_source, args.test_file_source, do_rot=True) # Target: training and test set (for relative rotation) rot_set_target = DatasetGeneratorMultimodal(args.data_root_target, args.train_file_target, ds_name=args.target, do_rot=True) """ Prepare data loaders """ # Source training recognition train_loader_source = DataLoader(train_set_source, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) # Source test recognition test_loader_source = DataLoader(test_set_source, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) # Target train train_loader_target = DataLoader(train_set_target, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) # Target test test_loader_target = DataLoader(train_set_target, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) # Source rot rot_source_loader = DataLoader(rot_set_source, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) rot_test_source_loader = DataLoader(rot_test_set_source, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) # Target rot rot_target_loader = DataLoader(rot_set_target, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) rot_test_target_loader = DataLoader(rot_set_target, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) """ Set up network & optimizer """ input_dim_F = 2048 if args.net == 'resnet50' else 512 netG_rgb = ResBase(architecture=args.net) netG_depth = ResBase(architecture=args.net) netF = ResClassifier(input_dim=input_dim_F * 2, class_num=args.class_num, dropout_p=args.dropout_p, extract=False) netF_rot = RelativeRotationClassifier(input_dim=input_dim_F * 2, class_num=4) netF_rot.apply(weights_init) netF.apply(weights_init) net_list = [netG_rgb, netG_depth, netF, netF_rot] net_list = map_to_device(device, net_list) ce_loss = nn.CrossEntropyLoss() opt_g_rgb = optim.SGD(netG_rgb.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) opt_g_depth = optim.SGD(netG_depth.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) opt_f = optim.SGD(netF.parameters(), lr=args.lr * args.lr_mult, momentum=0.9, weight_decay=args.weight_decay) opt_f_rot = optim.SGD(netF_rot.parameters(), lr=args.lr * args.lr_mult, momentum=0.9, weight_decay=args.weight_decay) optims_list = [opt_g_rgb, opt_g_depth, opt_f, opt_f_rot] for epoch in range(1, args.epoch + 1): print("Epoch {} / {}".format(epoch, args.epoch)) # ========================= TRAINING ========================= # Train source (recognition) train_loader_source_rec_iter = train_loader_source # Train target (entropy) train_target_loader_iter = IteratorWrapper(train_loader_target) # Test target test_target_loader_iter = IteratorWrapper(test_loader_target) # Source (rotation) rot_source_loader_iter = IteratorWrapper(rot_source_loader) # Target (rotation) rot_target_loader_iter = IteratorWrapper(rot_target_loader) with tqdm(total=len(train_loader_source), desc="Train") as pb: for batch_num, (img_rgb, img_depth, img_label_source) in enumerate(train_loader_source_rec_iter): if img_rgb.size(0) != args.batch_size: break # The optimization step is performed by OptimizerManager with OptimizerManager(optims_list): # Compute source features img_rgb, img_depth, img_label_source = map_to_device(device, (img_rgb, img_depth, img_label_source)) feat_rgb, _ = netG_rgb(img_rgb) feat_depth, _ = netG_depth(img_depth) features_source = torch.cat((feat_rgb, feat_depth), 1) logits = netF(features_source) # Classification los loss_rec = ce_loss(logits, img_label_source) # Entropy loss if args.weight_ent > 0.: # Compute target features img_rgb, img_depth, _ = train_target_loader_iter.get_next() img_rgb, img_depth = map_to_device(device, (img_rgb, img_depth)) feat_rgb, _ = netG_rgb(img_rgb) feat_depth, _ = netG_depth(img_depth) features_target = torch.cat((feat_rgb, feat_depth), 1) logits = netF(features_target) loss_ent = entropy_loss(logits) else: loss_ent = 0 # Backpropagate loss = loss_rec + args.weight_ent * loss_ent loss.backward() del img_rgb, img_depth, img_label_source, feat_rgb, feat_depth, logits # Relative Rotation if args.weight_rot > 0.0: # Load batch: rotation, source img_rgb, img_depth, _, rot_label = rot_source_loader_iter.get_next() img_rgb, img_depth, rot_label = map_to_device(device, (img_rgb, img_depth, rot_label)) # Compute features (without pooling!) _, pooled_rgb = netG_rgb(img_rgb) _, pooled_depth = netG_depth(img_depth) # Prediction logits_rot = netF_rot(torch.cat((pooled_rgb, pooled_depth), 1)) # Classification loss for the rleative rotation task loss_rot = ce_loss(logits_rot, rot_label) loss = args.weight_rot * loss_rot # Backpropagate loss.backward() loss_rot = loss_rot.item() del img_rgb, img_depth, rot_label, pooled_rgb, pooled_depth, logits_rot, loss # Load batch: rotation, target img_rgb, img_depth, _, rot_label = rot_target_loader_iter.get_next() img_rgb, img_depth, rot_label = map_to_device(device, (img_rgb, img_depth, rot_label)) # Compute features (without pooling!) _, pooled_rgb = netG_rgb(img_rgb) _, pooled_depth = netG_depth(img_depth) # Prediction logits_rot = netF_rot(torch.cat((pooled_rgb, pooled_depth), 1)) # Classification loss for the rleative rotation task loss = args.weight_rot * ce_loss(logits_rot, rot_label) # Backpropagate loss.backward() del img_rgb, img_depth, rot_label, pooled_rgb, pooled_depth, logits_rot, loss pb.update(1) # ========================= VALIDATION ========================= # Recognition - source actual_test_batches = min(len(test_loader_source), args.test_batches) with EvaluationManager(net_list), tqdm(total=actual_test_batches, desc="TestRecS") as pb: test_source_loader_iter = iter(test_loader_source) correct = 0.0 num_predictions = 0.0 val_loss = 0.0 for num_batch, (img_rgb, img_depth, img_label_source) in enumerate(test_source_loader_iter): # By default validate only on 100 batches if num_batch >= args.test_batches: break # Compute source features img_rgb, img_depth, img_label_source = map_to_device(device, (img_rgb, img_depth, img_label_source)) feat_rgb, _ = netG_rgb(img_rgb) feat_depth, _ = netG_depth(img_depth) features_source = torch.cat((feat_rgb, feat_depth), 1) # Compute predictions preds = netF(features_source) val_loss += ce_loss(preds, img_label_source).item() correct += (torch.argmax(preds, dim=1) == img_label_source).sum().item() num_predictions += preds.shape[0] pb.update(1) val_acc = correct / num_predictions val_loss = val_loss / args.test_batches print("Epoch: {} - Validation source accuracy (recognition): {}".format(epoch, val_acc)) del img_rgb, img_depth, img_label_source, feat_rgb, feat_depth, preds writer.add_scalar("Loss/train", loss_rec.item(), epoch) writer.add_scalar("Loss/val", val_loss, epoch) writer.add_scalar("Accuracy/val", val_acc, epoch) # Relative Rotation if args.weight_rot > 0.0: # Rotation - source cf_matrix = np.zeros([4, 4]) actual_test_batches = min(len(rot_test_source_loader), args.test_batches) with EvaluationManager(net_list), tqdm(total=actual_test_batches, desc="TestRotS") as pb: rot_test_source_loader_iter = iter(rot_test_source_loader) correct = 0.0 num_predictions = 0.0 for num_val_batch, (img_rgb, img_depth, _, rot_label) in enumerate(rot_test_source_loader_iter): if num_val_batch > args.test_batches: break img_rgb, img_depth, rot_label = map_to_device(device, (img_rgb, img_depth, rot_label)) # Compute features (without pooling) _, pooled_rgb = netG_rgb(img_rgb) _, pooled_depth = netG_depth(img_depth) # Compute predictions preds = netF_rot(torch.cat((pooled_rgb, pooled_depth), 1)) val_loss_rot = ce_loss(preds, rot_label).item() correct += (torch.argmax(preds, dim=1) == rot_label).sum().item() num_predictions += preds.shape[0] pb.update(1) del img_rgb, img_depth, rot_label, preds rot_val_acc = correct / num_predictions print("Epoch: {} - Validation source rotation accuracy: {}".format(epoch, rot_val_acc)) # Rotation - target actual_test_batches = min(len(rot_test_target_loader), args.test_batches) with EvaluationManager(net_list), tqdm(total=actual_test_batches, desc="TestRotT") as pb: rot_test_target_loader_iter = iter(rot_test_target_loader) correct = 0.0 val_loss_rot = 0.0 num_predictions = 0.0 for num_val_batch, (img_rgb, img_depth, _, rot_label) in enumerate(rot_test_target_loader_iter): if num_val_batch > args.test_batches: break img_rgb, img_depth, rot_label = map_to_device(device, (img_rgb, img_depth, rot_label)) # Compute features (without pooling) _, pooled_rgb = netG_rgb(img_rgb) _, pooled_depth = netG_depth(img_depth) # Compute predictions preds = netF_rot(torch.cat((pooled_rgb, pooled_depth), 1)) val_loss_rot += ce_loss(preds, rot_label).item() correct += (torch.argmax(preds, dim=1) == rot_label).sum().item() num_predictions += preds.shape[0] pb.update(1) rot_val_acc = correct / num_predictions val_loss_rot = val_loss_rot / args.test_batches print("Epoch: {} - Validation target rotation accuracy: {}".format(epoch, rot_val_acc)) del img_rgb, img_depth, rot_label, preds writer.add_scalar("Loss/rot", loss_rot, epoch) writer.add_scalar("Loss/rot_val", val_loss_rot, epoch) writer.add_scalar("Accuracy/rot_val", rot_val_acc, epoch) # Save models if epoch % 5 == 0: if not os.path.exists(args.snapshot): os.mkdir(args.snapshot) torch.save(netG_rgb.state_dict(), os.path.join(args.snapshot, hp_string + "_netG_rgb_epoch" + str(epoch) + ".pth")) torch.save(netG_depth.state_dict(), os.path.join(args.snapshot, hp_string + "_netG_depth_epoch" + str(epoch) + ".pth")) torch.save(netF.state_dict(), os.path.join(args.snapshot, hp_string + "_netF_rgbd_epoch" + str(epoch) + ".pth")) torch.save(netF_rot.state_dict(), os.path.join(args.snapshot, hp_string + "_netF_rot_rgbd_epoch" + str(epoch) + ".pth"))
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import binascii import os import pytest from cryptography.exceptions import ( AlreadyFinalized, InvalidSignature, _Reasons ) from cryptography.hazmat.primitives.poly1305 import Poly1305 from ...utils import ( load_nist_vectors, load_vectors_from_file, raises_unsupported_algorithm ) @pytest.mark.supported( only_if=lambda backend: not backend.poly1305_supported(), skip_message="Requires OpenSSL without poly1305 support" ) def test_poly1305_unsupported(backend): with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MAC): Poly1305(b"0" * 32) @pytest.mark.supported( only_if=lambda backend: backend.poly1305_supported(), skip_message="Requires OpenSSL with poly1305 support" ) class TestPoly1305(object): @pytest.mark.parametrize( "vector", load_vectors_from_file( os.path.join("poly1305", "rfc7539.txt"), load_nist_vectors ) ) def test_vectors(self, vector, backend): key = binascii.unhexlify(vector["key"]) msg = binascii.unhexlify(vector["msg"]) tag = binascii.unhexlify(vector["tag"]) poly = Poly1305(key) poly.update(msg) assert poly.finalize() == tag def test_key_with_no_additional_references(self, backend): poly = Poly1305(os.urandom(32)) assert len(poly.finalize()) == 16 def test_raises_after_finalize(self, backend): poly = Poly1305(b"0" * 32) poly.finalize() with pytest.raises(AlreadyFinalized): poly.update(b"foo") with pytest.raises(AlreadyFinalized): poly.finalize() def test_reject_unicode(self, backend): poly = Poly1305(b"0" * 32) with pytest.raises(TypeError): poly.update(u'') def test_verify(self, backend): poly = Poly1305(b"0" * 32) poly.update(b"msg") tag = poly.finalize() with pytest.raises(AlreadyFinalized): poly.verify(b"") poly2 = Poly1305(b"0" * 32) poly2.update(b"msg") poly2.verify(tag) def test_invalid_verify(self, backend): poly = Poly1305(b"0" * 32) poly.update(b"msg") with pytest.raises(InvalidSignature): poly.verify(b"") p2 = Poly1305(b"0" * 32) p2.update(b"msg") with pytest.raises(InvalidSignature): p2.verify(b"\x00" * 16) def test_verify_reject_unicode(self, backend): poly = Poly1305(b"0" * 32) with pytest.raises(TypeError): poly.verify(u'') def test_invalid_key_type(self, backend): with pytest.raises(TypeError): Poly1305(object()) def test_invalid_key_length(self, backend): with pytest.raises(ValueError): Poly1305(b"0" * 31) with pytest.raises(ValueError): Poly1305(b"0" * 33) def test_buffer_protocol(self, backend): key = binascii.unhexlify( b"1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cb" b"c207075c0" ) msg = binascii.unhexlify( b"2754776173206272696c6c69672c20616e642074686520736c69746" b"87920746f7665730a446964206779726520616e642067696d626c65" b"20696e2074686520776162653a0a416c6c206d696d7379207765726" b"52074686520626f726f676f7665732c0a416e6420746865206d6f6d" b"65207261746873206f757467726162652e" ) key = bytearray(key) poly = Poly1305(key) poly.update(bytearray(msg)) assert poly.finalize() == binascii.unhexlify( b"4541669a7eaaee61e708dc7cbcc5eb62" )
# pylint: disable=invalid-name """ https://leetcode.com/problems/longest-palindrome/ Given a string s which consists of lowercase or uppercase letters, return the length of the longest palindrome that can be built with those letters. Letters are case sensitive, for example, "Aa" is not considered a palindrome here. Example 1: Input: s = "abccccdd" Output: 7 Explanation: One longest palindrome that can be built is "dccaccd", whose length is 7. Example 2: Input: s = "a" Output: 1 Example 3: Input: s = "bb" Output: 2 Constraints: - 1 <= s.length <= 2000 - s consists of lowercase and/or uppercase English letters only. """ from typing import TYPE_CHECKING, List DEBUGGING = True try: # pylint: disable=unused-import import test_solution except ImportError: try: from typing import Counter except ImportError: from collections import Counter if not TYPE_CHECKING: Answer = int DEBUGGING = False else: from test_solution import Answer, Counter class Solution: "required by leetcode" # pylint: disable=too-few-public-methods,no-self-use def longestPalindrome(self, s: str) -> Answer: """ return the length of the longest palindrome constructible using only the characters in s """ # I am pretty sure collections.Counter will be indespensible here. # My observation is that a Counter for a palindrome would probably # contain at most one odd-count element: # "abcba" -> # { # "a": 2, # "b": 2, # "c": 1, # } # Another observation is that a palindrome of even length implies that # the string from which is was constructed contained at modt one # odd-count letter. # # In this vein, a palindrome can be constructed by separating all one 1-count letters. # Then, the odd-count letters are reduced by one, with the letter taken # off added to the 1-count letters (i.e.: # { # "a": 3, # "b": 5, # "c": 4, # "d": 2, # } # { # "e": 1, # "f": 1, # } # -> # { # "a": 2, # "b": 4, # "c": 4, # "d": 2, # } # { # "e": 1, # "f": 1, # "a": 1, # "b": 1, # } # Then, the counts of the even ones can be summed, and if # there are any 1-count letters, the sum can be incremented. if not TYPE_CHECKING and Answer is int: counts = Counter(s) has_one_counts = False for letter, count in counts.items(): if count % 2 == 1: # is odd counts[letter] -= 1 has_one_counts = True if has_one_counts: return counts.total() + 1 return counts.total() # count the number of times an element occurs in the string counts = Counter(sorted(s)) # initialize a place to keep track of elements which can't be used to # make a palindrome through mirroring singles: List[str] = [] for letter, count in counts.items(): if count % 2 == 1: # is odd counts[letter] -= 1 singles.append(letter) # ensures an int is returned, since all odd counts will first be made even counts[letter] //= 2 assert len(set(singles)) == len(singles), f"singles not unique:\n{s}\n{singles}" elements = list(counts.elements()) elements.sort() palindrome = elements[:] if singles: singles.sort() palindrome.append(singles[-1]) # all but the last element, reversed palindrome.extend(palindrome[-2::-1]) else: palindrome.extend(reversed(palindrome)) return "".join(palindrome)
""" Utilities for generating input and output strings. """ def seq_to_str(seq, sep="\n", with_len=True, len_sep="\n", end_sep="\n"): """ Convert a Python sequence into a string with the given separator. Each item is converted to a string using str(). If with_len is given, put the length first, and separate it from the rest with len_sep. Finish with end_sep. """ result = "" if with_len: result += "%d%s" % (len(seq), len_sep) result += sep.join(str(i) for i in seq) + end_sep return result def seqs_to_str(seq, sep="\n", with_len=True, len_sep="\n", end_sep="\n", inner_sep=" ", inner_with_len=False, inner_len_sep=" ", inner_end_sep=""): """ Convert a sequence of sequences to a string with the given separator. For each inner sequence, seq_to_str is called with the "inner" arguments. """ result = "" if with_len: result += "%d%s" % (len(seq), len_sep) result += sep.join(seq_to_str(s, sep=inner_sep, with_len=inner_with_len, len_sep=inner_len_sep, end_sep=inner_end_sep) for s in seq) + end_sep return result
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. import oci # noqa: F401 from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401 class ComputeClientCompositeOperations(object): """ This class provides a wrapper around :py:class:`~oci.core.ComputeClient` and offers convenience methods for operations that would otherwise need to be chained together. For example, instead of performing an action on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource to enter a given state, you can call a single method in this class to accomplish the same functionality """ def __init__(self, client, work_request_client=None, **kwargs): """ Creates a new ComputeClientCompositeOperations object :param ComputeClient client: The service client which will be wrapped by this object :param oci.work_requests.WorkRequestClient work_request_client: (optional) The work request service client which will be used to wait for work request states. Default is None. """ self.client = client self._work_request_client = work_request_client if work_request_client else oci.work_requests.WorkRequestClient(self.client._config, **self.client._kwargs) def attach_boot_volume_and_wait_for_state(self, attach_boot_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.attach_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolumeAttachment` acted upon to enter the given state(s). :param oci.core.models.AttachBootVolumeDetails attach_boot_volume_details: (required) Attach boot volume request :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeAttachment.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.attach_boot_volume` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.attach_boot_volume(attach_boot_volume_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_boot_volume_attachment(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def attach_vnic_and_wait_for_state(self, attach_vnic_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.attach_vnic` and waits for the :py:class:`~oci.core.models.VnicAttachment` acted upon to enter the given state(s). :param oci.core.models.AttachVnicDetails attach_vnic_details: (required) Attach VNIC details. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VnicAttachment.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.attach_vnic` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.attach_vnic(attach_vnic_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_vnic_attachment(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def attach_volume_and_wait_for_state(self, attach_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.attach_volume` and waits for the :py:class:`~oci.core.models.VolumeAttachment` acted upon to enter the given state(s). :param oci.core.models.AttachVolumeDetails attach_volume_details: (required) Attach volume request :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeAttachment.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.attach_volume` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.attach_volume(attach_volume_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_volume_attachment(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def capture_console_history_and_wait_for_state(self, capture_console_history_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.capture_console_history` and waits for the :py:class:`~oci.core.models.ConsoleHistory` acted upon to enter the given state(s). :param oci.core.models.CaptureConsoleHistoryDetails capture_console_history_details: (required) Console history details :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.ConsoleHistory.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.capture_console_history` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.capture_console_history(capture_console_history_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_console_history(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def change_compute_capacity_reservation_compartment_and_wait_for_work_request(self, capacity_reservation_id, change_compute_capacity_reservation_compartment_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.change_compute_capacity_reservation_compartment` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str capacity_reservation_id: (required) The OCID of the compute capacity reservation. :param oci.core.models.ChangeComputeCapacityReservationCompartmentDetails change_compute_capacity_reservation_compartment_details: (required) The configuration details for the move operation. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.change_compute_capacity_reservation_compartment` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.change_compute_capacity_reservation_compartment(capacity_reservation_id, change_compute_capacity_reservation_compartment_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def change_dedicated_vm_host_compartment_and_wait_for_work_request(self, dedicated_vm_host_id, change_dedicated_vm_host_compartment_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.change_dedicated_vm_host_compartment` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str dedicated_vm_host_id: (required) The OCID of the dedicated VM host. :param oci.core.models.ChangeDedicatedVmHostCompartmentDetails change_dedicated_vm_host_compartment_details: (required) The request to move the dedicated virtual machine host to a different compartment. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.change_dedicated_vm_host_compartment` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.change_dedicated_vm_host_compartment(dedicated_vm_host_id, change_dedicated_vm_host_compartment_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def change_instance_compartment_and_wait_for_work_request(self, instance_id, change_instance_compartment_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.change_instance_compartment` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str instance_id: (required) The `OCID`__ of the instance. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param oci.core.models.ChangeInstanceCompartmentDetails change_instance_compartment_details: (required) Request to change the compartment of a given instance. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.change_instance_compartment` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.change_instance_compartment(instance_id, change_instance_compartment_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_compute_capacity_reservation_and_wait_for_work_request(self, create_compute_capacity_reservation_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.create_compute_capacity_reservation` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param oci.core.models.CreateComputeCapacityReservationDetails create_compute_capacity_reservation_details: (required) Details for creating a new compute capacity reservation. **Caution:** Avoid using any confidential information when you use the API to supply string values. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.create_compute_capacity_reservation` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_compute_capacity_reservation(create_compute_capacity_reservation_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_compute_capacity_reservation_and_wait_for_state(self, create_compute_capacity_reservation_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.create_compute_capacity_reservation` and waits for the :py:class:`~oci.core.models.ComputeCapacityReservation` acted upon to enter the given state(s). :param oci.core.models.CreateComputeCapacityReservationDetails create_compute_capacity_reservation_details: (required) Details for creating a new compute capacity reservation. **Caution:** Avoid using any confidential information when you use the API to supply string values. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.ComputeCapacityReservation.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.create_compute_capacity_reservation` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_compute_capacity_reservation(create_compute_capacity_reservation_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_compute_capacity_reservation(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_dedicated_vm_host_and_wait_for_work_request(self, create_dedicated_vm_host_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.create_dedicated_vm_host` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param oci.core.models.CreateDedicatedVmHostDetails create_dedicated_vm_host_details: (required) The details for creating a new dedicated virtual machine host. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.create_dedicated_vm_host` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_dedicated_vm_host(create_dedicated_vm_host_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_dedicated_vm_host_and_wait_for_state(self, create_dedicated_vm_host_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.create_dedicated_vm_host` and waits for the :py:class:`~oci.core.models.DedicatedVmHost` acted upon to enter the given state(s). :param oci.core.models.CreateDedicatedVmHostDetails create_dedicated_vm_host_details: (required) The details for creating a new dedicated virtual machine host. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.DedicatedVmHost.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.create_dedicated_vm_host` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_dedicated_vm_host(create_dedicated_vm_host_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_dedicated_vm_host(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_image_and_wait_for_work_request(self, create_image_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.create_image` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param oci.core.models.CreateImageDetails create_image_details: (required) Image creation details :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.create_image` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_image(create_image_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_image_and_wait_for_state(self, create_image_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.create_image` and waits for the :py:class:`~oci.core.models.Image` acted upon to enter the given state(s). :param oci.core.models.CreateImageDetails create_image_details: (required) Image creation details :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Image.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.create_image` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_image(create_image_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_image(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_instance_console_connection_and_wait_for_state(self, create_instance_console_connection_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.create_instance_console_connection` and waits for the :py:class:`~oci.core.models.InstanceConsoleConnection` acted upon to enter the given state(s). :param oci.core.models.CreateInstanceConsoleConnectionDetails create_instance_console_connection_details: (required) Request object for creating an InstanceConsoleConnection :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.InstanceConsoleConnection.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.create_instance_console_connection` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_instance_console_connection(create_instance_console_connection_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_instance_console_connection(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_compute_capacity_reservation_and_wait_for_work_request(self, capacity_reservation_id, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.delete_compute_capacity_reservation` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str capacity_reservation_id: (required) The OCID of the compute capacity reservation. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.delete_compute_capacity_reservation` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.delete_compute_capacity_reservation(capacity_reservation_id, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_console_history_and_wait_for_state(self, instance_console_history_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.delete_console_history` and waits for the :py:class:`~oci.core.models.ConsoleHistory` acted upon to enter the given state(s). :param str instance_console_history_id: (required) The OCID of the console history. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.ConsoleHistory.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.delete_console_history` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_console_history(instance_console_history_id) operation_result = None try: operation_result = self.client.delete_console_history(instance_console_history_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_dedicated_vm_host_and_wait_for_work_request(self, dedicated_vm_host_id, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.delete_dedicated_vm_host` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str dedicated_vm_host_id: (required) The OCID of the dedicated VM host. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.delete_dedicated_vm_host` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.delete_dedicated_vm_host(dedicated_vm_host_id, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_image_and_wait_for_state(self, image_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.delete_image` and waits for the :py:class:`~oci.core.models.Image` acted upon to enter the given state(s). :param str image_id: (required) The `OCID`__ of the image. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Image.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.delete_image` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_image(image_id) operation_result = None try: operation_result = self.client.delete_image(image_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_instance_console_connection_and_wait_for_state(self, instance_console_connection_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.delete_instance_console_connection` and waits for the :py:class:`~oci.core.models.InstanceConsoleConnection` acted upon to enter the given state(s). :param str instance_console_connection_id: (required) The OCID of the instance console connection. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.InstanceConsoleConnection.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.delete_instance_console_connection` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_instance_console_connection(instance_console_connection_id) operation_result = None try: operation_result = self.client.delete_instance_console_connection(instance_console_connection_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def detach_boot_volume_and_wait_for_state(self, boot_volume_attachment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.detach_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolumeAttachment` acted upon to enter the given state(s). :param str boot_volume_attachment_id: (required) The OCID of the boot volume attachment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeAttachment.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.detach_boot_volume` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_boot_volume_attachment(boot_volume_attachment_id) operation_result = None try: operation_result = self.client.detach_boot_volume(boot_volume_attachment_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def detach_vnic_and_wait_for_state(self, vnic_attachment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.detach_vnic` and waits for the :py:class:`~oci.core.models.VnicAttachment` acted upon to enter the given state(s). :param str vnic_attachment_id: (required) The OCID of the VNIC attachment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VnicAttachment.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.detach_vnic` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_vnic_attachment(vnic_attachment_id) operation_result = None try: operation_result = self.client.detach_vnic(vnic_attachment_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def detach_volume_and_wait_for_state(self, volume_attachment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.detach_volume` and waits for the :py:class:`~oci.core.models.VolumeAttachment` acted upon to enter the given state(s). :param str volume_attachment_id: (required) The OCID of the volume attachment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeAttachment.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.detach_volume` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_volume_attachment(volume_attachment_id) operation_result = None try: operation_result = self.client.detach_volume(volume_attachment_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def export_image_and_wait_for_work_request(self, image_id, export_image_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.export_image` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str image_id: (required) The `OCID`__ of the image. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param oci.core.models.ExportImageDetails export_image_details: (required) Details for the image export. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.export_image` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.export_image(image_id, export_image_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def export_image_and_wait_for_state(self, image_id, export_image_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.export_image` and waits for the :py:class:`~oci.core.models.Image` acted upon to enter the given state(s). :param str image_id: (required) The `OCID`__ of the image. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param oci.core.models.ExportImageDetails export_image_details: (required) Details for the image export. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Image.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.export_image` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.export_image(image_id, export_image_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_image(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def instance_action_and_wait_for_state(self, instance_id, action, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.instance_action` and waits for the :py:class:`~oci.core.models.Instance` acted upon to enter the given state(s). :param str instance_id: (required) The `OCID`__ of the instance. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param str action: (required) The action to perform on the instance. Allowed values are: "STOP", "START", "SOFTRESET", "RESET", "SOFTSTOP", "SENDDIAGNOSTICINTERRUPT" :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Instance.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.instance_action` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.instance_action(instance_id, action, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_instance(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def launch_instance_and_wait_for_work_request(self, launch_instance_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.launch_instance` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param oci.core.models.LaunchInstanceDetails launch_instance_details: (required) Instance details :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.launch_instance` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.launch_instance(launch_instance_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def launch_instance_and_wait_for_state(self, launch_instance_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.launch_instance` and waits for the :py:class:`~oci.core.models.Instance` acted upon to enter the given state(s). :param oci.core.models.LaunchInstanceDetails launch_instance_details: (required) Instance details :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Instance.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.launch_instance` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.launch_instance(launch_instance_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_instance(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def terminate_instance_and_wait_for_state(self, instance_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.terminate_instance` and waits for the :py:class:`~oci.core.models.Instance` acted upon to enter the given state(s). :param str instance_id: (required) The `OCID`__ of the instance. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Instance.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.terminate_instance` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_instance(instance_id) operation_result = None try: operation_result = self.client.terminate_instance(instance_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_compute_capacity_reservation_and_wait_for_work_request(self, capacity_reservation_id, update_compute_capacity_reservation_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_compute_capacity_reservation` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str capacity_reservation_id: (required) The OCID of the compute capacity reservation. :param oci.core.models.UpdateComputeCapacityReservationDetails update_compute_capacity_reservation_details: (required) Update compute capacity reservation details. :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_compute_capacity_reservation` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_compute_capacity_reservation(capacity_reservation_id, update_compute_capacity_reservation_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_console_history_and_wait_for_state(self, instance_console_history_id, update_console_history_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_console_history` and waits for the :py:class:`~oci.core.models.ConsoleHistory` acted upon to enter the given state(s). :param str instance_console_history_id: (required) The OCID of the console history. :param oci.core.models.UpdateConsoleHistoryDetails update_console_history_details: (required) Update instance fields :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.ConsoleHistory.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_console_history` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_console_history(instance_console_history_id, update_console_history_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_console_history(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_dedicated_vm_host_and_wait_for_state(self, dedicated_vm_host_id, update_dedicated_vm_host_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_dedicated_vm_host` and waits for the :py:class:`~oci.core.models.DedicatedVmHost` acted upon to enter the given state(s). :param str dedicated_vm_host_id: (required) The OCID of the dedicated VM host. :param oci.core.models.UpdateDedicatedVmHostDetails update_dedicated_vm_host_details: (required) Update dedicated VM host details :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.DedicatedVmHost.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_dedicated_vm_host` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_dedicated_vm_host(dedicated_vm_host_id, update_dedicated_vm_host_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_dedicated_vm_host(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_image_and_wait_for_state(self, image_id, update_image_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_image` and waits for the :py:class:`~oci.core.models.Image` acted upon to enter the given state(s). :param str image_id: (required) The `OCID`__ of the image. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param oci.core.models.UpdateImageDetails update_image_details: (required) Updates the image display name field. Avoid entering confidential information. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Image.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_image` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_image(image_id, update_image_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_image(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_instance_and_wait_for_work_request(self, instance_id, update_instance_details, work_request_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_instance` and waits for the oci.work_requests.models.WorkRequest to enter the given state(s). :param str instance_id: (required) The `OCID`__ of the instance. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param oci.core.models.UpdateInstanceDetails update_instance_details: (required) Update instance fields :param list[str] work_request_states: (optional) An array of work requests states to wait on. These should be valid values for :py:attr:`~oci.work_requests.models.WorkRequest.status` Default values are termination states: [STATUS_SUCCEEDED, STATUS_FAILED, STATUS_CANCELED] :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_instance` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_instance(instance_id, update_instance_details, **operation_kwargs) work_request_states = work_request_states if work_request_states else oci.waiter._WORK_REQUEST_TERMINATION_STATES lowered_work_request_states = [w.lower() for w in work_request_states] work_request_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self._work_request_client, self._work_request_client.get_work_request(work_request_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_work_request_states, **waiter_kwargs ) return waiter_result except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_instance_and_wait_for_state(self, instance_id, update_instance_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_instance` and waits for the :py:class:`~oci.core.models.Instance` acted upon to enter the given state(s). :param str instance_id: (required) The `OCID`__ of the instance. __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param oci.core.models.UpdateInstanceDetails update_instance_details: (required) Update instance fields :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Instance.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_instance` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_instance(instance_id, update_instance_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_instance(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_instance_console_connection_and_wait_for_state(self, instance_console_connection_id, update_instance_console_connection_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_instance_console_connection` and waits for the :py:class:`~oci.core.models.InstanceConsoleConnection` acted upon to enter the given state(s). :param str instance_console_connection_id: (required) The OCID of the instance console connection. :param oci.core.models.UpdateInstanceConsoleConnectionDetails update_instance_console_connection_details: (required) Update instanceConsoleConnection tags :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.InstanceConsoleConnection.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_instance_console_connection` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_instance_console_connection(instance_console_connection_id, update_instance_console_connection_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_instance_console_connection(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_volume_attachment_and_wait_for_state(self, volume_attachment_id, update_volume_attachment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.core.ComputeClient.update_volume_attachment` and waits for the :py:class:`~oci.core.models.VolumeAttachment` acted upon to enter the given state(s). :param str volume_attachment_id: (required) The OCID of the volume attachment. :param oci.core.models.UpdateVolumeAttachmentDetails update_volume_attachment_details: (required) Update information about the specified volume attachment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeAttachment.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.core.ComputeClient.update_volume_attachment` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_volume_attachment(volume_attachment_id, update_volume_attachment_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_volume_attachment(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
#!/usr/bin/env python # # Translational stop site (tlss) distance # For each read (BED file), determine the distance to the nearest 3' UTR translational stop site # Input is a BED file containing all 3'UTRs - distance calculated is from TLSS. # # Matches before the TLSS are negative, w/in 3' UTR are positive import sys tlss = {} genes = {} binsize = 10000 if len(sys.argv) != 4: sys.stderr.write('Error - missing arguments!\nUsage: %s tlss.bed genes.bed reads.bed\n\n' % sys.argv[0]) sys.exit(1) #read tlss file (BED) with open(sys.argv[1]) as f: for line in f: if not line.strip() or (line and line[0] == '#'): continue cols = line.strip().split('\t') chrom = cols[0] strand = cols[5] name = cols[3].split('/')[0] start = int(cols[1]) end = int(cols[2]) startbin = start / binsize endbin = end / binsize for i in xrange(startbin, endbin+1) : if not name in tlss: tlss[name] = [] tlss[name].append((start, end)) #read gene file (BED) with open(sys.argv[2]) as f: for line in f: if not line.strip() or (line and line[0] == '#'): continue cols = line.strip().split('\t') chrom = cols[0] strand = cols[5] name = cols[3].split('/')[0] start = int(cols[1]) end = int(cols[2]) startbin = start / binsize endbin = end / binsize for i in xrange(startbin, endbin+1) : if not (chrom, strand, i) in genes: genes[(chrom, strand, i)] = [] genes[(chrom, strand, i)].append((start, end, name)) skipped = 0 # read in READ positions (BED) with open(sys.argv[3]) as f: for line in f: if not line.strip() or (line and line[0] == '#'): continue cols = line.strip().split('\t') chrom, start, end, name, strand = cols[0], int(cols[1]), int(cols[2]), cols[3], cols[5] binspan = 0 dist = None found_gene = False while not found_gene and binspan < 4: binspan += 1 dist = None site = None #print "test %s:%s-%s" % (chrom, start, end) startbin = start / binsize endbin = end / binsize startbin -= binspan endbin += binspan #print "test %s:%s-%s (%s,%s)" % (chrom, start, end, startbin, endbin) best_tlss_dist = None best_upstream = False # sys.stderr.write("(%s,%s,%s, %s)\n" % (chrom, start, end, binspan)) for i in xrange(startbin, endbin+1): if (chrom, strand, i) in genes: for gene_start, gene_end, gene_name in genes[(chrom, strand, i)]: if gene_start <= start <= gene_end or gene_start <= end <= gene_end: found_gene = True if gene_name in tlss: # sys.stderr.write(" => Gene: %s (%s, %s)\n" % (gene_name, gene_start, gene_end)) for utr_start, utr_end in tlss[gene_name]: # sys.stderr.write(" => UTR (%s, %s)\n" % (utr_start, utr_end)) if strand == '+': if start <= utr_start <= end: dist = 0 else: dist = start - utr_start if dist < 0: dist = end - utr_start else: if start <= utr_end <= end: dist = 0 else: dist = utr_end - end if dist < 0: dist = utr_end - start if best_tlss_dist is None or abs(dist) < best_tlss_dist: best_tlss_dist = abs(dist) if dist < 0: best_upstream = True else: best_upstream = False if best_tlss_dist is not None: dist = best_tlss_dist if best_upstream: print '%s\tTLSS\t-%s' % (name, dist) else: print '%s\tTLSS\t%s' % (name, dist)
from utils.utils import write_to_video, make_dir import cv2 import json import argparse import os import glob import numpy as np def video_generator(video_dir, frame_height=64, frame_width=64, frame_channels=3, batch_size=1, time=10, camera_fps=2, crop_height=224, crop_width=112, json_files="dataset_json", json_filename="kinetics_train.json"): while(True): video_folders_dir = os.path.join(video_dir, '*_act_14_*') video_folders = glob.glob(video_folders_dir) # print("Number of data ". len(videos)) # # print(videoss_files) # previous_frames = np.empty((batch_size, time, frame_height, frame_width, frame_channels)) current_frames = np.empty((batch_size, time, frame_height, frame_width, frame_channels)) next_frames = np.empty((batch_size, time, frame_height, frame_width, frame_channels)) batch_count = 0 for video_folder in video_folders: key = os.path.basename(video_folder) # print(key) i = 0 key_frame_id = 1; img_filename = video_folder + "/" + key + "_{:06d}.jpg".format(key_frame_id) # print(img_filename) if (frame_channels==1): color = cv2.IMREAD_GRAYSCALE else: color = cv2.IMREAD_COLOR img = cv2.imread(img_filename, color) while img is not None: height = img.shape[0] width = img.shape[1] #Height start at 0, cut image in half crosswise height_start = 0 #Width start is half the crop width width_start = int((width - crop_width)/2) # print(width_start) # print(img.shape) cropped_frame = img[height_start:height_start + crop_height, width_start:width_start + crop_width] # cv2.imshow('cropped', cropped_frame) # cv2.waitKey(0) resized_frame = cv2.resize(cropped_frame, (frame_width, frame_height))/255. # print(resized_frame.shape) if frame_channels == 1: resized_frame = np.expand_dims(resized_frame, axis=2) if i == 0: current_frames[batch_count,i,:,:,:] = resized_frame elif i < time: current_frames[batch_count,i,:,:,:] = resized_frame next_frames[batch_count,i-1,:,:,:] = resized_frame else: next_frames[batch_count,i-1,:,:,:] = resized_frame if i == time: i = -1 batch_count = batch_count + 1 if batch_count % batch_size == 0: yield current_frames, next_frames # cv2.destroyAllWindows() batch_count = 0 current_frames = np.empty_like(current_frames) next_frames = np.empty_like(next_frames) i += 1 key_frame_id += 5 img_filename = video_folder + "/" + key + "_{:06d}.jpg".format(key_frame_id) # print(img_filename) img = cv2.imread(img_filename, color) # for video in videos: # # print(video) # video_key = os.path.basename(video).split(".")[0] # # print(video_key) # # json_file = os.path.join(json_files, json_filename) # # with open(json_file) as f: # # data = json.load(f) # # if data[key]['annotations']['label'] == "walking the dog": # # segment = data[video_key]["annotations"]["segment"] # # print(segment) # # print(video) # cap = cv2.VideoCapture(video) # # print(cap.get(cv2.CAP_PROP_FPS)) # video_fps = cap.get(cv2.CAP_PROP_FPS); # # # print(video_fps) # frame_sample = int(video_fps/camera_fps) # if (frame_sample) < 1: # frame_sample = 1 # # print(frame_sample) # video_folders_dir = os.path.join(video_dir, '*.avi') # videos = glob.glob(video_folders_dir) # for video in videos: # i = 0 # frame_count = 0 # cap = cv2.VideoCapture(video) # video_fps = cap.get(cv2.CAP_PROP_FPS); # frame_sample = int(video_fps/camera_fps) # if (frame_sample) < 1: # frame_sample = 1 # current_frames = np.empty((batch_size, 2*time - 1, frame_height, frame_width, frame_channels)) # next_frames = np.empty((batch_size, 2*time - 1, frame_height, frame_width, frame_channels)) # batch_count = 0 # while(cap.isOpened()): # ret, frame = cap.read() # # print(video) # if not ret: # break # ###loader for kth # if frame_count % frame_sample == 0: # resized_frame = (cv2.cvtColor(cv2.resize(frame, (frame_width, frame_height)), cv2.COLOR_BGR2GRAY)-127.5)/127.5 # resized_frame = np.expand_dims(resized_frame, axis=4) # if i == 0: # current_frames[batch_count,i,:,:,:] = resized_frame # elif i < 2*time - 1: # current_frames[batch_count,i,:,:,:] = resized_frame # next_frames[batch_count,i-1,:,:,:] = resized_frame # else: # next_frames[batch_count,i-1,:,:,:] = resized_frame # if i == 2*time - 1: # i = -1 # batch_count = batch_count + 1 # if batch_count % batch_size == 0: # yield current_frames, next_frames # batch_count = 0 # current_frames = np.empty_like(current_frames) # next_frames = np.empty_like(next_frames) # i = i + 1 # frame_count = frame_count + 1 def discriminator_loader(video_generator, latent_dim=8, seed=0, time_init=10): rng = np.random.RandomState(seed) while True: x, y = next(video_generator) # print("Discriminator") batch_size = y.shape[0] time = y.shape[1] z_p = rng.normal(size=(batch_size, time, latent_dim)) y_real = np.ones((batch_size), dtype='float32') y_fake = np.zeros((batch_size), dtype='float32') yield [x[:,0], x[:,0:time_init], y, x, z_p], [y_real, y_real, y_fake, y_fake] def discriminator_data(x, y, latent_dim=8, seed=0, time_init=10): rng = np.random.RandomState(seed) batch_size = y.shape[0] time = y.shape[1] z_p = rng.normal(size=(batch_size, time, latent_dim)) y_real = np.ones((batch_size), dtype='float32') y_fake = np.zeros((batch_size), dtype='float32') return [x[:,0], x[:,0:time_init], y, x, z_p], [y_real, y_real, y_fake, y_fake] def generator_loader(video_generator, latent_dim=8, seed=0, eval=False, time_init=10): rng = np.random.RandomState(seed) while True: x, y = next(video_generator) # print("Generator") batch_size = y.shape[0] time = y.shape[1] z_p = rng.normal(size=(batch_size, time, latent_dim)) y_real = np.ones((batch_size), dtype='float32') # y_real = np.zeros((batch_size), dtype='float32') # if eval: yield [x[:,0], x[:,0:time_init], y, x, z_p], [y_real, y_real, y] # else: # yield [x0[:,0], np.expand_dims(x0[:,0], axis=1), z_p], [x[:,0], x, y, z_p], [y_real, y_real, y] def generator_data(x, y, latent_dim=8, seed=0, time_init=10): rng = np.random.RandomState(seed) batch_size = y.shape[0] time = y.shape[1] z_p = rng.normal(size=(batch_size, time, latent_dim)) y_real = np.ones((batch_size), dtype='float32') # y_real = np.zeros((batch_size), dtype='float32') return [x[:,0], x[:,0:time_init], y, x, z_p], [y_real, y_real, y] def vaegan_loader(video_generator, latent_dim=8, seed=0, time_init=10): rng = np.random.RandomState(seed) while True: x, y = next(video_generator) batch_size = y.shape[0] time = y.shape[1] z_p = rng.normal(size=(batch_size, time, latent_dim)) yield [x[:,0], x, z_p], y def encoder_loader(video_generator, latent_dim=8, seed=0, time_init=10): rng = np.random.RandomState(seed) while True: x, y = next(video_generator) batch_size = y.shape[0] time = y.shape[1] z_p = rng.normal(size=(batch_size, time, latent_dim * 2)) yield [x[:,0], x[:,0:time_init], x, y], [z_p, y] def encoder_data(x, y, latent_dim=8, seed=0, time_init=10): rng = np.random.RandomState(seed) batch_size = y.shape[0] time = y.shape[1] z_p = rng.normal(size=(batch_size, time, latent_dim * 2)) return [x[:,0], x[:,0:time_init], x, y], [z_p, y] def download_videos(directory_json, output_dir, label): #Read all .json files new_directory = os.path.join(directory_json, '*.json') json_files = glob.glob(new_directory) # print(json_files) for json_file in json_files: folder_name = os.path.join(output_dir, os.path.split(json_file)[1].split('.')[0]) # print(folder_name) with open(json_file) as f: data = json.load(f) for key in data.keys(): if data[key]['annotations']['label'] == label: # print(data[key]['url']) output_file = os.path.join(folder_name, key+'.mp4') video_url = data[key]['url'] ydl_opts = { 'outtmpl': output_file, 'format': '160' } ydl = youtube_dl.YoutubeDL(ydl_opts) try: info_dict = ydl.extract_info(video_url, download=True) except: print('Unable to download: ', video_url) continue # print(info_dict) def crop_videos(dataset_json, input_dir, output_dir, dataset="kinetics_train"): # video_files = os.path.join(directory_json, directory_json_files) json_file = os.path.join(dataset_json, dataset+".json") with open(json_file) as f: data = json.load(f) video_file_dir = os.path.join(input_dir, dataset, "*.mp4") video_files = glob.glob(video_file_dir) print(video_file_dir) for video in video_files: print("Reading ", video) video_key = os.path.basename(video).split(".")[0] segment = data[video_key]["annotations"]["segment"] # print(segment) cap = cv2.VideoCapture(video) # print(cap.get(cv2.CAP_PROP_FPS)) video_fps = cap.get(cv2.CAP_PROP_FPS) video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) time = int((segment[1] - segment[0]) * video_fps) frames = np.empty((time, video_height, video_width, 3)) i = 0 frame_count = -1; j = 0 while(cap.isOpened()): ret, frame = cap.read() # print(video) if not ret: break frame_count = frame_count + 1 current_time = frame_count / video_fps if current_time < segment[0]: # j = 0 # print("Current time ", current_time) continue else: if j == time: print("Breaking") break # cv2.imshow('frame1', frame) # cv2.waitKey(25) frames[j] = frame j = j + 1 # else: # break save_dir = os.path.join(output_dir, dataset) make_dir(save_dir) output_file = os.path.join(output_dir, dataset, video_key + ".avi") write_to_video(frames, output_file, video_height, video_width, video_fps) if __name__=='__main__': parser = argparse.ArgumentParser(description='Download videos from youtube') parser.add_argument('-d', '--dataset_json', type=str, default='dataset_json') parser.add_argument('-o', '--output_dir', type=str, default='/media/fjbriones/Vorcha/Datasets/Kinetics-600') parser.add_argument('-ol', '--output_dir_local', type=str, default='data') parser.add_argument('-l', '--label', type=str, default='news anchoring') args = parser.parse_args() # download_videos(args.dataset_json, args.output_dir, args.label) crop_videos(args.dataset_json, args.output_dir, args.output_dir_local) crop_videos(args.dataset_json, args.output_dir, args.output_dir_local, dataset="kinetics_val") crop_videos(args.dataset_json, args.output_dir, args.output_dir_local, dataset="kinetics_test")
import math def get_slabs(): slabs_list=[0]+[int(i) for i in input().split()]+[math.inf] percent=[0]+[int(i) for i in input().split()] slabs=[] for i in range(len(slabs_list)-1): val=(slabs_list[i+1]-slabs_list[i])*percent[i]/100 slabs.append((slabs_list[i],slabs_list[i+1],percent[i],val)) return slabs slabs=get_slabs() rebate=int(input()) tax_paid=[int(i) for i in input().split()] final_ans=0 for employee_paid in tax_paid: ans=rebate for slab in slabs: if employee_paid - slab[3] == 0 : ans+=slab[0]+employee_paid*100/slab[2] break elif slab[3] == math.inf : ans+=slab[0]+employee_paid*100/slab[2] break elif employee_paid - slab[3] > 0 : employee_paid-=slab[3] else: ans+=slab[0]+employee_paid*100/slab[2] break final_ans+=ans print(int(final_ans))
from __future__ import print_function from string import Template as TStr import logging import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request class GSheet: # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] # The ID and range of a sample spreadsheet. # noinspection SpellCheckingInspection SPREADSHEET_ID = '1J3EZrysnfOrE6XVLJodydwPXQbHvYszzaZ67CFB9E5w' RANGE_NAME = 'At Work!A2:C' def __init__(self): """Shows basic usage of the Sheets API. Prints values from a sample spreadsheet. Source: https://developers.google.com/sheets/api/quickstart/python """ creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', self.SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) # build(..) sends an API call service = build('sheets', 'v4', credentials=creds) self.data_fetcher = service.spreadsheets().values() def fetch_all_rows(self): # Call the Sheets API # .execute() sends an API call result = self.data_fetcher.get(spreadsheetId=self.SPREADSHEET_ID, range=self.RANGE_NAME).execute() return result.get('values', []) def _find_indexed_item_row(self, item_name: str): values = self.fetch_all_rows() if not values: return None match_idx_rows = [(i, r) for i, r in enumerate(values) if str(r[0]).lower().startswith(item_name.lower())] if not match_idx_rows: return None indexed_item_row = match_idx_rows[0] # item_index = 0..MAX_ROW_INDEX # item_row: item_name, item_quantity, item_quantity_unit if len(match_idx_rows) > 1: logging.warning("Found %d matches; only returning first: %s", len(match_idx_rows), indexed_item_row) return indexed_item_row def find_item_row(self, item_name: str): index_item_row = self._find_indexed_item_row(item_name) if not index_item_row: return None (index, item_row) = index_item_row return item_row def contains_positive(self, item_name: str) -> bool: item_row = self.find_item_row(item_name) if not item_row: logging.warning("Item not found in sheet: %s", item_name) return False if len(item_row) < 2: logging.warning("Row does not contain at least three columns: %s", item_row) return False [_, item_quantity, _] = item_row return float(item_quantity) > 0 def __contains__(self, item): return self.contains_positive(item) def fetch_item_quantity(self, item_name: str) -> float: item_row = self.find_item_row(item_name) if not item_row: return 0.0 if len(item_row) < 3: logging.warning("Row does not contain at least three columns: %s", item_row) return 0.0 [_, item_quantity, _] = item_row return float(item_quantity) def __getitem__(self, item): return self.fetch_item_quantity(item) def send_item_quantity(self, item_name: str, quantity: float): indexed_item_row = self._find_indexed_item_row(item_name) if not indexed_item_row: logging.warning("Item not found in Google Spreadsheet: %s", item_name) return False (index, item_row) = indexed_item_row coord = "B" + str(index + 1 + 1) body = { "range": coord, "values": [[quantity]] } response = self.data_fetcher.update(spreadsheetId=self.SPREADSHEET_ID, range=coord, body=body, valueInputOption="RAW").execute() return response.get('updatedCells') == 1 def __setitem__(self, key, value): self.send_item_quantity(str(key), float(value)) def __str__(self): values = self.fetch_all_rows() lines = [] for row in values: [name, qty, unit] = row lines.append(TStr("$item: $qty $unit") .substitute(item=name, qty=qty, unit=unit)) return "\n".join(lines) if __name__ == "__main__": gs = GSheet() print(gs) print() print(TStr("Bananas: $qty").substitute(qty=gs.fetch_item_quantity("Banana"))) print(TStr("Tuna cans: $qty") .substitute(qty=gs.fetch_item_quantity("Tuna cans"))) print(TStr("Nonexistent item: $qty") .substitute(qty=gs.fetch_item_quantity("Nonexistent item"))) dairy_milk_name = "Dairy milk" orig_dairy_milk_qty = gs.fetch_item_quantity(dairy_milk_name) new_dairy_milk_qty = 9.9 print(TStr("$name: $qty") .substitute(name=dairy_milk_name, qty=orig_dairy_milk_qty)) print(TStr("Updating $name to $qty...") .substitute(name=dairy_milk_name, qty=new_dairy_milk_qty)) gs.send_item_quantity("Dairy milk", new_dairy_milk_qty) print(TStr("$name: $qty") .substitute(name=dairy_milk_name, qty=gs.fetch_item_quantity(dairy_milk_name))) print(TStr("Updating $name to $qty...") .substitute(name=dairy_milk_name, qty=orig_dairy_milk_qty)) gs.send_item_quantity("Dairy milk", orig_dairy_milk_qty) print(TStr("$name: $qty") .substitute(name=dairy_milk_name, qty=gs.fetch_item_quantity(dairy_milk_name)))
import glob import math import numpy as np import os import fnmatch import sys def round_up(x, y): return int(math.ceil(float(x) / float(y))) def add_parameter(class_object, kwargs, parameter, default=None): if parameter in kwargs: setattr(class_object, parameter, kwargs.get(parameter)) else: setattr(class_object, parameter, default) def additional_kwargs(class_object, kwargs): output_kwargs = {} for parameter in kwargs: if not hasattr(class_object, parameter): output_kwargs[parameter] = kwargs[parameter] return output_kwargs def rot90(array, n=1, axis=2): """Rotate an array by 90 degrees in the counter-clockwise direction around the given axis Nabbed from https://stackoverflow.com/questions/33190042/how-to-calculate-all-24-rotations-of-3d-array """ array = np.swapaxes(array, 2, axis) array = np.rot90(array, n) array = np.swapaxes(array, 2, axis) return array def grab_files_recursive(input_directory, regex='*', return_dir=False, return_file=True, recursive=True): """ Returns all files recursively in a directory. Essentially a convenience wrapper around os.walk. Parameters ---------- input_directory: str The folder to search. regex: str A linux-style pattern to match. Returns ------- output_list: list A list of found files. """ output_list = [] if recursive: for root, subFolders, files in os.walk(input_directory): if return_dir: for subFolder in subFolders: if fnmatch.fnmatch(subFolder, regex): output_list += [os.path.join(root, subFolder)] if return_file: for file in files: if fnmatch.fnmatch(file, regex): output_list += [os.path.join(root, file)] else: output_list = glob.glob(os.path.join(input_directory, regex)) return output_list def nifti_splitext(input_filepath): """ os.path.splitext splits a filename into the part before the LAST period and the part after the LAST period. This will screw one up if working with, say, .nii.gz files, which should be split at the FIRST period. This function performs an alternate version of splitext which does just that. TODO: Make work if someone includes a period in a folder name (ugh). Parameters ---------- input_filepath: str The filepath to split. Returns ------- split_filepath: list of str A two-item list, split at the first period in the filepath. """ input_filepath = str(input_filepath) path_split = str.split(input_filepath, os.sep) basename = path_split[-1] split_filepath = str.split(basename, '.') if len(split_filepath) <= 1: return split_filepath else: return [os.path.join(os.sep.join(path_split[0:-1]), split_filepath[0]), '.' + '.'.join(split_filepath[1:])] def replace_extension(input_filepath, extension): """Convenience function to safely switch out an extension. Parameters ---------- input_filepath : str Filepath to switch extensions on. extension : str Extension to switch in. Returns ------- str Filepath with switched extension. """ input_filepath = os.path.abspath(input_filepath) basename = os.path.basename(input_filepath) pre_extension = str.split(basename, '.')[0] return os.path.join(os.path.dirname(input_filepath), pre_extension + extension) def replace_suffix(input_filepath, input_suffix, output_suffix, suffix_delimiter=None, file_extension=None): """ Replaces an input_suffix in a filename with an output_suffix. Can be used to generate or remove suffixes by leaving one or the other option blank. Parameters ---------- input_filepath: str The filename to be transformed. input_suffix: str The suffix to be replaced output_suffix: str The suffix to replace with. suffix_delimiter: str Optional, overrides input_suffix. Replaces whatever comes after suffix_delimiter with output_suffix. Returns ------- output_filepath: str The transformed filename """ # Temp code to deal with directories, TODO if os.path.isdir(input_filepath): output_filepath = input_filepath + output_suffix + file_extension return output_filepath else: if '.' in os.path.basename(input_filepath): split_filename = nifti_splitext(input_filepath) else: split_filename = [input_filepath, ''] if suffix_delimiter is not None: input_suffix = str.split(split_filename[0], suffix_delimiter)[-1] if input_suffix not in os.path.basename(input_filepath): print(('ERROR!', input_suffix, 'not in input_filepath.')) return [] else: if input_suffix == '': prefix = split_filename[0] else: prefix = input_suffix.join(str.split(split_filename[0], input_suffix)[0:-1]) prefix = prefix + output_suffix output_filepath = prefix + split_filename[1] if file_extension is not None: replace_extension(output_filepath, file_extension) return output_filepath def make_dir(input_directory): """ Convenience function that adds os.path.exists to os.makedirs """ if not os.path.exists(input_directory): os.makedirs(input_directory) def quotes(input_string): """ Some command line function require filepaths with spaces in them to be in quotes. """ return '"' + input_string + '"' def cli_sanitize(input_filepath, save=False, delete=False): """ Copies out a filename without spaces, or deletes that file. Will not work for directories with spaces in their names. """ input_filepath = os.path.abspath(input_filepath) new_filepath = os.path.join(os.path.dirname(input_filepath), os.path.basename(input_filepath).replace(' ', '__')) if delete: os.remove(new_filepath) if save: os.copy(input_filepath, new_filepath) return new_filepath def docker_print(*args): """ Docker doesn't flush stdout in some circumstances, so one needs to do so manually. Parameters ---------- *args Print parameters """ print(args) sys.stdout.flush()
#!/usr/bin/python #-*- coding: utf-8 -*- import socket def main(): HOST = '212.47.229.1' PORT = 33003 tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) dest = (HOST, PORT) tcp.connect(dest) data = tcp.recv(2048) while "FLAG" not in data: result = 0 print(data) valores = data.split(b"[>]")[1].split(b"\n")[0].split(b" ") op = valores[2] if op == b"XOR": result = int(valores[1]) ^ int(valores[3]) elif op == b"OR": result = int(valores[1]) | int(valores[3]) elif op == b"AND": result = int(valores[1]) & int(valores[3]) else: print(op) print(result) tcp.send(str(result) + b"\n") data = tcp.recv(2048) print(data) if __name__ == "__main__": main()
from django.shortcuts import render from molo.profiles.admin import FrontendUsersModelAdmin, UserProfileModelAdmin from molo.profiles.models import ( UserProfilesSettings, UserProfile, SecurityAnswer) from wagtail.contrib.modeladmin.options import modeladmin_register from wagtail.admin.site_summary import SummaryItem from wagtail.core import hooks class ProfileWarningMessagee(SummaryItem): order = 100 template = 'admin/profile_warning_message.html' @hooks.register('construct_homepage_panels') def profile_warning_message(request, panels): profile_settings = UserProfilesSettings.for_site(request.site) if not profile_settings.country_code and \ profile_settings.show_mobile_number_field: panels[:] = [ProfileWarningMessagee(request)] modeladmin_register(FrontendUsersModelAdmin) modeladmin_register(UserProfileModelAdmin) class AccessErrorMessage(SummaryItem): order = 100 template = 'wagtail/access_error_message.html' @hooks.register('construct_homepage_panels') def add_access_error_message_panel(request, panels): if UserProfile.objects.filter(user=request.user).exists() and \ not request.user.is_superuser: if not request.user.profile.admin_sites.filter( pk=request.site.pk).exists(): panels[:] = [AccessErrorMessage(request)] @hooks.register('before_delete_page') def before_delete_security_question(request, page): if SecurityAnswer.objects.filter(question_id=page.id): return render( request, 'admin/security_question_delete_warrning.html', { 'page': page, 'parent_id': page.get_parent().id })
# The port SendGrid will upload events to. REPOSTER_PORT = 12345 # Where to repost them? SITE_URLS = { 'default': 'http://localhost:10002', 'dev-marcink': 'http://localhost:10003', }
""" Shared utility functions """ from boto.datapipeline import regions from boto.datapipeline.layer1 import DataPipelineConnection from time import sleep import dateutil.parser from dataduct.config import Config config = Config() REGION = config.etl.get('REGION', None) DP_ACTUAL_END_TIME = '@actualEndTime' DP_ATTEMPT_COUNT_KEY = '@attemptCount' DP_INSTANCE_ID_KEY = 'id' DP_INSTANCE_STATUS_KEY = '@status' def _update_sleep_time(last_time=None): """Expotentially decay sleep times between calls incase of failures Note: Start time for sleep is 5 and the max is 60 Args: last_time(int): time used in the last iteration Returns: next_time(int): time to sleep in the next iteration of the code """ start_sleep_time = 5 max_sleep_time = 60 if last_time is None: return start_sleep_time return min(last_time * 2, max_sleep_time) def get_response_from_boto(fn, *args, **kwargs): """Expotentially decay sleep times between calls incase of failures Note: If there is a rate limit error, sleep until the error goes away Args: func(function): Function to call args(optional): arguments kwargs(optional): keyword arguments Returns: response(json): request response. Input: func(function): Function to call args(optional): arguments kwargs(optional): keyword arguments """ response = None sleep_time = None while response is None: try: response = fn(*args, **kwargs) except Exception, error: if error.error_code != 'ThrottlingException': raise else: sleep_time = _update_sleep_time(sleep_time) print "Rate limit exceeded. Sleeping %d seconds." % sleep_time sleep(sleep_time) return response def get_list_from_boto(func, response_key, *args, **kwargs): """Get a paginated list from boto Args: func(function): Function to call response_key(str): Key which points to a list *args(optional): arguments **kwargs(optional): keyword arguments Returns: results(list): Aggregated list of items indicated by the response key """ results = [] has_more_results, marker = True, None while has_more_results: kwargs['marker'] = marker response = get_response_from_boto(func, *args, **kwargs) has_more_results = response['hasMoreResults'] if has_more_results: marker = response['marker'] results.extend(response[response_key]) return results def list_pipeline_instances(pipeline_id, conn=None, increment=25): """List details of all the pipeline instances Args: pipeline_id(str): id of the pipeline conn(DataPipelineConnection): boto connection to datapipeline increment(int): rate of increments in API calls Returns: instances(list): list of pipeline instances """ if conn is None: conn = get_datapipeline_connection() # Get all instances instance_ids = sorted(get_list_from_boto(conn.query_objects, 'ids', pipeline_id, 'INSTANCE')) # Collect all instance details instances = [] for start in range(0, len(instance_ids), increment): # Describe objects in batches as API is rate limited response = get_response_from_boto( conn.describe_objects, instance_ids[start:start + increment], pipeline_id, ) for pipeline_object in response['pipelineObjects']: pipeline_dict = dict( ( sub_dict['key'], sub_dict.get('stringValue', sub_dict.get('refValue', None)) ) for sub_dict in pipeline_object['fields'] ) pipeline_dict['id'] = pipeline_object['id'] # Append to all instances instances.append(pipeline_dict) return instances def get_datapipeline_connection(): """Get boto connection of AWS data pipeline Returns: DataPipelineConnection: boto connection """ region = next((x for x in regions() if x.name == str(REGION).lower()), None) conn = DataPipelineConnection(region=region) return conn def list_pipelines(conn=None): """Fetch a list of all pipelines with boto Args: conn(DataPipelineConnection): boto connection to datapipeline Returns: pipelines(list): list of pipelines fetched with boto """ if conn is None: conn = get_datapipeline_connection() return get_list_from_boto( conn.list_pipelines, 'pipelineIdList', ) def date_string(date): """Normalizes a date string to YYYY-mm-dd HH:MM:SS """ if date is None: return 'NULL' return str(dateutil.parser.parse(date)) def list_formatted_instance_details(pipeline): """List of instance rows formatted to match """ etl_runs = pipeline.instance_details() entries = [] for etl_run_dt in sorted(etl_runs.keys()): # Look through instances for instance in sorted( etl_runs[etl_run_dt], key=lambda x: x.get(DP_ACTUAL_END_TIME, None)): entries.append( [ instance[DP_INSTANCE_ID_KEY], pipeline.id, date_string(etl_run_dt), date_string(instance.get(DP_ACTUAL_END_TIME)), instance[DP_INSTANCE_STATUS_KEY], instance.get(DP_ATTEMPT_COUNT_KEY, 'NULL'), ] ) return entries
from django.http import HttpResponse from django.http import HttpResponseRedirect from django.shortcuts import redirect import json from .models import * def unauthenticated_user(view_func): def wrapper_func(request, *args, **kwargs): if request.user.is_authenticated: return HttpResponseRedirect("./") else: return view_func(request, *args, **kwargs) return wrapper_func def allowed_users(allowed_roles=[]): def decorator(view_func): def wrapper_func(request, *args, **kwargs): group = None if request.user.groups.exists(): group = request.user.groups.all()[0].name if group in allowed_roles: return view_func(request, *args, **kwargs) else: return HttpResponse('You are not authorized to view this page') return wrapper_func return decorator def cookieCart(request): # Create empty cart for now for non-logged in user try: cart = json.loads(request.COOKIES['cart']) except: cart = {} print('CART:', cart) items = [] order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False} cartItems = order['get_cart_items'] for i in cart: # We use try block to prevent items in cart that may have been removed from causing error try: cartItems += cart[i]['quantity'] product = Product.objects.get(id=i) total = (product.price * cart[i]['quantity']) order['get_cart_total'] += total order['get_cart_items'] += cart[i]['quantity'] item = { 'id': product.id, 'product': {'id': product.id, 'name': product.name, 'price': product.price, 'imageURL': product.imageURL}, 'quantity': cart[i]['quantity'], 'digital': product.digital, 'get_total': total, } items.append(item) if product.digital == False: order['shipping'] = True except: pass return {'cartItems': cartItems, 'order': order, 'items': items} def cartData(request): if request.user.is_authenticated: customer = request.user.customer order, created = Order.objects.get_or_create(customer=customer, complete=False) items = order.orderitem_set.all() cartItems = order.get_cart_items else: cookieData = cookieCart(request) cartItems = cookieData['cartItems'] order = cookieData['order'] items = cookieData['items'] return {'cartItems': cartItems, 'order': order, 'items': items} def guestOrder(request, data): name = data['form']['name'] email = data['form']['email'] cookieData = cookieCart(request) items = cookieData['items'] customer, created = Customer.objects.get_or_create( email=email, ) customer.name = name customer.save() order = Order.objects.create( customer=customer, complete=False, ) for item in items: product = Product.objects.get(id=item['id']) orderItem = OrderItem.objects.create( product=product, order=order, quantity=item['quantity'], ) return customer, order
from .base import Transform, TransformChain from .homogeneous import * from .thinplatesplines import ThinPlateSplines from .piecewiseaffine import PiecewiseAffine from .rbf import R2LogR2RBF, R2LogRRBF from .groupalign.procrustes import GeneralizedProcrustesAnalysis
# encryption.py # Class to utilize the cryptography library and produce encrypted values # Authors: Akansh Divker import string import random from Crypto.Hash import SHA256 class Encryption: def __init__(self): letters = string.ascii_lowercase key = ''.join(random.choice(letters) for i in range(16)) hash_object = SHA256.new(data=key.encode('utf-8')) self.key = hash_object.hexdigest() def encrypt(self, plaintext: str) -> str: ciphertext = "" for i in range(len(plaintext)): current = plaintext[i] current_key = self.key[i%len(self.key)] ciphertext += chr(ord(current) ^ ord(current_key)) return ciphertext def to_byte_string(self, convert) -> str: return ''.join(['\\'+hex(b)[1:] for b in convert])
# Generated by Django 3.2.3 on 2021-07-01 16:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('order', '0014_auto_20210628_0012'), ] operations = [ migrations.AlterField( model_name='ordernumbermodel', name='order_date', field=models.IntegerField(default=0, verbose_name='下单时间'), ), ]
# Author: Kelvin Lai <kelvin@firststreet.org> # Copyright: This module is owned by First Street Foundation # Standard Imports import logging # Internal Imports from firststreet.api import csv_format from firststreet.api.api import Api from firststreet.models.environmental import EnvironmentalPrecipitation class Environmental(Api): """This class receives a list of search_items and handles the creation of a environmental product from the request. Methods: get_precipitation: Retrieves a list of Environmental Precipitation for the given list of IDs """ def get_precipitation(self, search_item, csv=False, output_dir=None, extra_param=None): """Retrieves environmental precipitation product data from the First Street Foundation API given a list of search_items and returns a list of Environmental Precipitation objects. Args: search_item (list/file): A First Street Foundation IDs, lat/lng pair, address, or a file of First Street Foundation IDs csv (bool): To output extracted data to a csv or not output_dir (str): The output directory to save the generated csvs extra_param (str): Extra parameter to be added to the url Returns: A list of Adaptation Detail """ # Get data from api and create objects api_datas = self.call_api(search_item, "environmental", "precipitation", "county", extra_param=extra_param) product = [EnvironmentalPrecipitation(api_data) for api_data in api_datas] if csv: csv_format.to_csv(product, "environmental", "precipitation", "county", output_dir=output_dir) logging.info("Environmental Precipitation Data Ready.") return product
# Time: O(m * n) # Space: O(1) # A matrix is Toeplitz if every diagonal from top-left to bottom-right has the same element. # Now given an M x N matrix, return True if and only if the matrix is Toeplitz. # # Example 1: # # Input: matrix = [[1,2,3,4],[5,1,2,3],[9,5,1,2]] # Output: True # Explanation: # 1234 # 5123 # 9512 # # In the above grid, the diagonals are # "[9]", "[5, 5]", "[1, 1, 1]", "[2, 2, 2]", "[3, 3]", "[4]", # and in each diagonal all elements are the same, so the answer is True. # # Example 2: # # Input: matrix = [[1,2],[2,2]] # Output: False # Explanation: # The diagonal "[1, 2]" has different elements. # # Note: # - matrix will be a 2D array of integers. # - matrix will have a number of rows and columns in range [1, 20]. # - matrix[i][j] will be integers in range [0, 99]. class Solution(object): def isToeplitzMatrix(self, matrix): """ :type matrix: List[List[int]] :rtype: bool """ return all(i == 0 or j == 0 or matrix[i-1][j-1] == val for i, row in enumerate(matrix) for j, val in enumerate(row))
from .model import ClassAttention, ClassAttentionLayer, SelfAttentionLayer, CaiTBackbone, CaiTWithLinearClassifier from .config import CaiTConfig
from struct import pack, unpack from .zkconst import * def zkversion(self): """Start a connection with the time clock""" command = CMD_VERSION command_string = '' chksum = 0 session_id = self.session_id reply_id = unpack('HHHH', self.data_recv[:8])[3] buf = self.createHeader(command, chksum, session_id, reply_id, command_string) self.zkclient.sendto(buf, self.address) #print buf.encode("hex") try: self.data_recv, addr = self.zkclient.recvfrom(1024) self.session_id = unpack('HHHH', self.data_recv[:8])[2] return self.data_recv[8:] except: return False
"""Deprecated path for GCS storage.""" # TODO: Remove after deprecation period. from grow.common import deprecated from grow.storage import google_storage as new_ref # pylint: disable=invalid-name CloudStorage = deprecated.MovedHelper( new_ref.CloudStorage, 'grow.pods.storage.google_storage.CloudStorage') CloudStorageLoader = deprecated.MovedHelper( new_ref.CloudStorageLoader, 'grow.pods.storage.google_storage.CloudStorageLoader')
# -*- coding:utf-8 -*- from mako import runtime, filters, cache UNDEFINED = runtime.UNDEFINED STOP_RENDERING = runtime.STOP_RENDERING __M_dict_builtin = dict __M_locals_builtin = locals _magic_number = 10 _modified_time = 1443802885.4184651 _enable_loop = True _template_filename = '/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_facebook.tmpl' _template_uri = 'comments_helper_facebook.tmpl' _source_encoding = 'utf-8' _exports = ['comment_link_script', 'comment_form', 'comment_link'] def render_body(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: __M_locals = __M_dict_builtin(pageargs=pageargs) __M_writer = context.writer() __M_writer('\n\n') __M_writer('\n\n') __M_writer('\n') return '' finally: context.caller_stack._pop_frame() def render_comment_link_script(context): __M_caller = context.caller_stack._push_frame() try: comment_system_id = context.get('comment_system_id', UNDEFINED) __M_writer = context.writer() __M_writer('\n<div id="fb-root"></div>\n<script>\n // thank lxml for this\n $(\'.fb-comments-count\').each(function(i, obj) {\n var url = obj.attributes[\'data-url\'].value;\n // change here if you dislike the default way of displaying\n // this\n obj.innerHTML = \'<fb:comments-count href="\' + url + \'"></fb:comments-count> comments\';\n });\n\n window.fbAsyncInit = function() {\n // init the FB JS SDK\n FB.init({\n appId : \'') __M_writer(str(comment_system_id)) __M_writer('\',\n status : true,\n xfbml : true\n });\n\n };\n\n // Load the SDK asynchronously\n (function(d, s, id){\n var js, fjs = d.getElementsByTagName(s)[0];\n if (d.getElementById(id)) {return;}\n js = d.createElement(s); js.id = id;\n js.src = "//connect.facebook.net/en_US/all.js";\n fjs.parentNode.insertBefore(js, fjs);\n }(document, \'script\', \'facebook-jssdk\'));\n</script>\n') return '' finally: context.caller_stack._pop_frame() def render_comment_form(context,url,title,identifier): __M_caller = context.caller_stack._push_frame() try: comment_system_id = context.get('comment_system_id', UNDEFINED) __M_writer = context.writer() __M_writer('\n<div id="fb-root"></div>\n<script>\n window.fbAsyncInit = function() {\n // init the FB JS SDK\n FB.init({\n appId : \'') __M_writer(str(comment_system_id)) __M_writer('\',\n status : true,\n xfbml : true\n });\n\n };\n\n // Load the SDK asynchronously\n (function(d, s, id){\n var js, fjs = d.getElementsByTagName(s)[0];\n if (d.getElementById(id)) {return;}\n js = d.createElement(s); js.id = id;\n js.src = "//connect.facebook.net/en_US/all.js";\n fjs.parentNode.insertBefore(js, fjs);\n }(document, \'script\', \'facebook-jssdk\'));\n</script>\n\n<div class="fb-comments" data-href="') __M_writer(str(url)) __M_writer('" data-width="470"></div>\n') return '' finally: context.caller_stack._pop_frame() def render_comment_link(context,link,identifier): __M_caller = context.caller_stack._push_frame() try: __M_writer = context.writer() __M_writer('\n<span class="fb-comments-count" data-url="') __M_writer(str(link)) __M_writer('">\n') return '' finally: context.caller_stack._pop_frame() """ __M_BEGIN_METADATA {"uri": "comments_helper_facebook.tmpl", "source_encoding": "utf-8", "filename": "/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_facebook.tmpl", "line_map": {"48": 8, "34": 32, "35": 46, "36": 46, "69": 63, "42": 2, "61": 28, "47": 2, "16": 0, "49": 8, "50": 25, "51": 25, "21": 26, "22": 30, "23": 62, "57": 28, "29": 32, "62": 29, "63": 29}} __M_END_METADATA """
#!/usr/bin/python3 # UTF8 # Date: Thu 20 Jun 2019 13:00:45 CEST # Author: Nicolas Flandrois from engine import Engine def main(): """Main running function.""" Engine.menu() if __name__ == '__main__': main()
import logging from aiogram import types from aiogram.dispatcher import Dispatcher from aiogram.utils.executor import start_webhook from config import ( API_TOKEN, preliminary_command, retraction_command, secret, update_command, ) from logconfig import logging_kwargs from gracebot import GraceBot from ngrok import get_ngrok_url, get_port logging.basicConfig(**logging_kwargs) # type: ignore bot = GraceBot(token=API_TOKEN) dp = Dispatcher(bot) @dp.message_handler(commands=["start", "help"]) async def send_welcome(message: types.Message): await bot.send_welcome_message(message) @dp.message_handler(commands=["latest"]) async def send_latest_event(message: types.Message): await bot.send_latest(message) @dp.message_handler(commands=["event"]) async def send_event(message: types.Message): await bot.send_event_selector(message) event_selection_keys = bot.event_keys + ["next", "previous"] @dp.callback_query_handler(lambda cb: cb.data in event_selection_keys) async def inline_kb_answer_callback_handler(query: types.CallbackQuery): await bot.event_selector_callback_handler(query) @dp.message_handler(commands=["stats"]) async def send_o3_stats(message: types.Message): await bot.send_o3_stats(message) @dp.message_handler(commands=["status"]) async def send_detector_status(message: types.Message): await bot.send_detector_status(message) @dp.message_handler(commands=["subscribe"]) async def add_subscriber(message: types.Message): await bot.add_subscriber(message) @dp.message_handler(commands=["unsubscribe"]) async def remove_subscriber(message: types.Message): await bot.remove_subscriber(message) @dp.message_handler(commands=[preliminary_command]) @dp.async_task async def send_preliminary(message: types.Message): await bot.send_preliminary(message) @dp.message_handler(commands=[update_command]) @dp.async_task async def send_update(message: types.Message): await bot.send_update(message) @dp.message_handler(commands=[retraction_command]) @dp.async_task async def send_retraction(message: types.Message): await bot.send_retraction(message) async def on_startup(dp): webhook_url = f"{get_ngrok_url()}/{secret}" await bot.set_webhook(webhook_url) if __name__ == "__main__": webapp_host = "localhost" webapp_port = get_port() start_webhook( dispatcher=dp, webhook_path=f"/{secret}", on_startup=on_startup, skip_updates=True, host=webapp_host, port=webapp_port, )
# coding=utf-8 from __future__ import absolute_import, print_function import torch from torch.backends import cudnn from evaluations import extract_features import models from data import dataset from utils.serialization import load_checkpoint cudnn.benchmark = True def Model2Feature(data, net, checkpoint, dim=512, width=224, root=None, nThreads=16, batch_size=100, pool_feature=False, model=None, org_feature=False, args=None): dataset_name = data if model is None: model = models.create(net, dim=dim, pretrained=False) resume = checkpoint model.load_state_dict(resume['state_dict'], strict=False) model = torch.nn.DataParallel(model).cuda() data = dataset.Dataset(data, width=width, root=root, mode="test", self_supervision_rot=0, args=args) if dataset_name in ['shop', 'jd_test', 'cifar']: gallery_loader = torch.utils.data.DataLoader( data.gallery, batch_size=batch_size, shuffle=False, drop_last=False, pin_memory=True, num_workers=nThreads) query_loader = torch.utils.data.DataLoader( data.query, batch_size=batch_size, shuffle=False, drop_last=False, pin_memory=True, num_workers=nThreads) gallery_feature, gallery_labels = extract_features(model, gallery_loader, print_freq=1e5, metric=None, pool_feature=pool_feature, org_feature=org_feature) query_feature, query_labels = extract_features(model, query_loader, print_freq=1e5, metric=None, pool_feature=pool_feature, org_feature=org_feature) if org_feature: norm = query_feature.norm(dim=1, p=2, keepdim=True) query_feature = query_feature.div(norm.expand_as(query_feature)) print("feature normalized 1") norm = gallery_feature.norm(dim=1, p=2, keepdim=True) gallery_feature = gallery_feature.div(norm.expand_as(gallery_feature)) print("feature normalized 2") else: data_loader = torch.utils.data.DataLoader( data.gallery, batch_size=batch_size, shuffle=False, drop_last=False, pin_memory=True, num_workers=nThreads) features, labels = extract_features(model, data_loader, print_freq=1e5, metric=None, pool_feature=pool_feature, org_feature=org_feature) if org_feature: norm = features.norm(dim=1, p=2, keepdim=True) features = features.div(norm.expand_as(features)) print("feature normalized") gallery_feature, gallery_labels = query_feature, query_labels = features, labels return gallery_feature, gallery_labels, query_feature, query_labels
#!/usr/bin/env python # vi: sw=4 ts=4: """ Mnemonic: vfd_pre_start.py Abstract: This script calls the 'dpdk_nic_bind' script to bind PF's and VF's to vfio-pci Date: April 2016 Author: Dhanunjaya Naidu Ravada (dr3662@att.com) Mod: 2016 7 Apr - Created script 2016 8 Apr - fix to index out of bound error 2016 22 Apr - remove unloading ixgbevf driver 2016 30 May - wait for vf's to create """ import subprocess import json import sys import logging from logging.handlers import RotatingFileHandler import os import time VFD_CONFIG = '/etc/vfd/vfd.cfg' SYS_DIR = "/sys/devices" LOG_DIR = '/var/log/vfd' # global pciids list pciids = [] # global group pciids list group_pciids = [] # To catch index error index = 0 # logging log = logging.getLogger('vfd_pre_start') def setup_logging(logfile): handler = RotatingFileHandler(os.path.join(LOG_DIR, logfile), maxBytes=200000, backupCount=20) log_formatter = logging.Formatter('%(asctime)s %(process)s %(levelname)s %(name)s [-] %(message)s') handler.setFormatter(log_formatter) log.setLevel(logging.INFO) log.addHandler(handler) def is_vfio_pci_loaded(): try: subprocess.check_call('lsmod | grep vfio_pci >/dev/null', shell=True) return True except subprocess.CalledProcessError: return False # load vfio-pci module def load_vfio_pci_driver(): try: subprocess.check_call('modprobe vfio-pci', shell=True) return True except subprocess.CalledProcessError: return Flase # get pciids from vfd.cfg def get_pciids(): with open(VFD_CONFIG) as data_file: try: data = json.load(data_file) except ValueError: log.error("%s is not a valid json", VFD_CONFIG) sys.exit(1) return data['pciids'] # unbind pciid def unbind_pfs(dev_id): unbind_cmd = 'dpdk_nic_bind --force -u %s' % dev_id log.info(unbind_cmd) try: msg = subprocess.check_output(unbind_cmd, shell=True) if "Routing" in msg: log.error(msg) return False return True except subprocess.CalledProcessError: return False def get_vfids(dev_id): cmd='find %s -name %s -type d | while read d; do echo "$d"; ls -l $d | grep virtfn| sed \'s!.*/!!\'; done' % (SYS_DIR, dev_id) vfids = subprocess.check_output(cmd, shell=True).split('\n')[1:] log.info("[%s]: %s", dev_id, vfids) return filter(None, vfids) # bind pf's and vf's to vfio-pci def bind_pf_vfs(dev_id): bind_cmd = 'dpdk_nic_bind --force -b vfio-pci %s' % dev_id log.info(bind_cmd) try: subprocess.check_call(bind_cmd, shell=True) return True except subprocess.CalledProcessError: return False # check whether vfio-pci driver is attached to pf or vf def driver_attach(dev_id): global index index = 0 cmd = 'lspci -k -s %s' % dev_id try: driver_name = subprocess.check_output(cmd, shell=True).splitlines()[2].split(':')[1].lstrip() if driver_name == 'vfio-pci': return True return False except IndexError: index = 1 return False # get the pci cards in the group which must be attached to the vfio-pci driver def get_pciids_group(dev_id): global group_pciids group_num = None cmd = "find /sys/kernel/iommu_groups -type l|grep %s | awk -F/ '{print $(NF-2)}'" % dev_id group_num = subprocess.check_output(cmd, shell=True) if group_num != None: cmd = "find /sys/kernel/iommu_groups -type l|grep groups.%s" % group_num list_pciids = subprocess.check_output(cmd, shell=True) for pciid in list_pciids.splitlines(): group_pciids.append(pciid.split('/')[-1]) # get the vendor details def check_vendor(): global pciids not_ixgbe_vendor = [] for pciid in pciids: cmd = "lspci -vm -s %s" % pciid try: vendor_name = subprocess.check_output(cmd, shell=True).splitlines()[2].split(':')[1].lstrip() if vendor_name == 'Intel Corporation': continue else: not_ixgbe_vendor.append(pciid) except IndexError: log.error("Not able to find valid vendor %s", pciid) sys.exit(1) return not_ixgbe_vendor def get_configured_vfs(pciids): vfd_count = 0 for pciid in pciids: vfd_count = vfd_count + len(get_vfids(pciid)) return vfd_count def main(): global pciids global group_pciids global index for value in get_pciids(): if 'id' in value: pciids.append(value['id']) else: pciids.append(value) for pciid in pciids: get_pciids_group(pciid) pciids = list(set(pciids) | set(group_pciids)) log.info("pciids: %s", pciids) vfs_count = get_configured_vfs(pciids) if vfs_count == 0: log.error("It seems VF's are not Created, check 'dpdk_nic_bind --st'") sys.exit(1) not_ixgbe_vendor = check_vendor() if len(not_ixgbe_vendor) > 0: log.error("VFD wont handle for this vendors: %s", not_ixgbe_vendor) sys.exit(1) if not is_vfio_pci_loaded(): if load_vfio_pci_driver(): log.info("Successfully loaded vfio-pci driver") else: log.error("unable to load vfio-pci driver") sys.exit(1) else: log.info("Already loaded vfio-pci driver") for pciid in pciids: if not driver_attach(pciid): if index == 0: if not unbind_pfs(pciid): log.error("unable to unbind %s PF", pciid) sys.exit(1) else: log.info("Successfully unbind %s", pciid) else: log.info("Already %s bind to vfio-pci driver", pciid) for pciid in pciids: if not bind_pf_vfs(pciid): log.error("unable to bind %s with vfio-pci", pciid) sys.exit(1) else: log.info("Successfully bind %s", pciid) for vfid in get_vfids(pciid): if not driver_attach(vfid): if index == 0: if not unbind_pfs(vfid): log.error("unable to unbind %s VF", vfid) sys.exit(1) else: log.info("Successfully unbind %s", vfid) if not bind_pf_vfs(vfid): log.error("unbale to bind %s with vfio-pci", vfid) sys.exit(1) else: log.info("Successfully bind %s", vfid) if index == 1: if not bind_pf_vfs(vfid): log.error("unbale to bind %s with vfio-pci", vfid) sys.exit(1) else: log.info("Successfully bind %s", vfid) else: log.info("Already %s bind to vfio-pci driver", vfid) if __name__ == '__main__': setup_logging('vfd_upstart.log') main()
def first_repeated(c): hash = dict() c = list(c) for i in range(len(c)): if c[i] not in hash: hash[c[i]] = 1 else: return c[i] return "No repeated Element" a = input("Enter : ") print(first_repeated(a))
# Generated by Django 3.1.7 on 2021-05-03 04:08 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.CreateModel( name='Department', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('major_name', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='NewsletterSection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('section_name', models.CharField(max_length=50)), ], ), migrations.RenameField( model_name='user', old_name='class_of', new_name='class_year', ), migrations.RemoveField( model_name='user', name='user_type', ), migrations.AddField( model_name='user', name='relation_to_college', field=models.CharField(choices=[('Student', 'Student'), ('Alumni', 'Alumni'), ('Faculty', 'Faculty'), ('Staff', 'Staff'), ('Par_Fam', 'Parent/Family'), ('CHC', 'College Hill Community'), ('Other', 'Other')], default='Student', max_length=20), preserve_default=False, ), migrations.AddField( model_name='user', name='department', field=models.ManyToManyField(to='users.Department'), ), migrations.AddField( model_name='user', name='interest_newsletter', field=models.ManyToManyField(to='users.NewsletterSection'), ), ]
def add(x, y): return x + y def diff(x, y): return x - y def multiple(x, y): return x * y def get_x(x,y): return x def get_y(x,y): return y def x_is_greater_than_y(x,y): return (x > y) * 1.0
#!/bin/python3 # author: Jan Hybs import argparse import pathlib import maya from cihpc.common.utils.vcs import HistoryBrowser default_min_age = maya.when('6 months ago') parser = argparse.ArgumentParser() parser.add_argument('-u', '--url', help='URL of the repo', default='https://github.com/flow123d/flow123d.git') parser.add_argument('-b', '--branch', help='Specify branch', default=None) parser.add_argument('--min-age', help='Drop all commit older than this date', type=maya.when, default=default_min_age) parser.add_argument('-n', '--limit', help='Number od commits to process', type=int, default=10) parser.add_argument('repo', help='Path to the repo', type=pathlib.Path, default=pathlib.Path('.')) args = parser.parse_args() repo: pathlib.Path = args.repo url: str = args.url limit: int = args.limit branch: str = args.branch min_age: maya.MayaDT = args.min_age history_browser = HistoryBrowser(url, repo) history_browser.commit_surroundings('68afef4') # for hist in history_browser.git_history(None, limit=limit, min_age=min_age): # print(hist)
from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.label import Label from kivy.lang import Builder from kivy.uix.textinput import TextInput from kivy.uix.stacklayout import StackLayout from kivy.uix.button import Button import helper as hlp talk = hlp.Talk() Builder.load_string(""" <ScreenUI>: orientation: 'lr-tb' Button: text: 'get' on_press: root.get_touch() size: 200, 50 size_hint: None, None Button: id: post_ text: 'post' on_press: root.post_touch() size_hint: None, None size: 200, 50 Button: id: put_ text: 'put' size_hint: None, None size: 200, 50 Button: id: login text: 'login' on_press: root.login_touch() size_hint: None, None size: 200, 50 TextInput: text: '/serwisy/mapi/pacjent' id: met size_hint: None, None size: 800, 30 Label: text: 'editor' size_hint: None, None size: 800, 30 TextInput: id: input_ text: 'put something here' size_hint: None, None size: 800, 200 Label: text: 'server output' size_hint: None, None size: 800, 30 TextInput: id: output_ text: 'put something here' size_hint: None, None size: 800, 270 """) class ScreenUI(StackLayout): def post_touch(self): print self.ids.input_.text def get_touch(self): m = self.ids.met.text r=unicode(talk.get_(m)) self.ids.output_.text=r def login_touch(self): r=talk.login_() self.ids.output_.text=r class WidgetApp(App): def build(self): app = ScreenUI() return app if __name__ == '__main__': WidgetApp().run()
# _*_coding:utf-8_*_ import pika from config import config def mq_client(): _config = config("../conf/mq.ini") user = _config.getOption("rabbit_mq", "user") passwd = _config.getOption("rabbit_mq", "passwd") ip = _config.getOption("rabbit_mq", "ip") port = _config.getOption("rabbit_mq", "port") credentials = pika.PlainCredentials(user, passwd) connection = pika.BlockingConnection(pika.ConnectionParameters( ip,port,'/',credentials)) channel = connection.channel() channel.queue_declare(queue='balance') return channel
import copy import inspect from urlparse import urljoin from jfr_playoff.dto import Match, Team from jfr_playoff.logger import PlayoffLogger class ResultInfoClient(object): def __init__(self, settings, database=None): self.settings = settings self.database = database @property def priority(self): return 0 def is_capable(self): return False def get_exceptions(self, method): pass class ResultInfo(object): def __init__(self, *args): self.clients = self._fill_client_list(*args) @property def submodule_path(self): raise NotImplementedError() @property def _client_classes(self): module = __import__(self.submodule_path, fromlist=['']) for submodule_path in module.CLIENTS: submodule = __import__(submodule_path, fromlist=['']) for member in inspect.getmembers(submodule, inspect.isclass): if member[1].__module__ == submodule_path: yield member[1] def _fill_client_list(self, *args): all_clients = [c(*args) for c in self._client_classes] clients = [c for c in all_clients if c.is_capable()] return sorted(clients, key=lambda c: c.priority, reverse=True) def call_client(self, method, default, *args): PlayoffLogger.get('resultinfo').info( 'calling %s on result info clients', method) for client in self.clients: try: ret = getattr(client, method)(*args) PlayoffLogger.get('resultinfo').info( '%s.%s method returned %s', client.__class__.__name__, method, ret) return ret except Exception as e: if type(e) \ in client.get_exceptions(method) + (NotImplementedError,): PlayoffLogger.get('resultinfo').warning( '%s.%s method raised %s(%s)', client.__class__.__name__, method, type(e).__name__, str(e)) else: raise PlayoffLogger.get('resultinfo').info( '%s method returning default: %s', method, default) return default class TournamentInfo(ResultInfo): def __init__(self, settings, database): ResultInfo.__init__(self, settings, database) self.final_positions = settings.get('final_positions', []) @property def submodule_path(self): return 'jfr_playoff.data.tournament' def get_tournament_results(self): teams = self.call_client('get_tournament_results', []) if self.is_finished(): PlayoffLogger.get('tournamentinfo').info( 'setting final positions from tournament results: %s', self.final_positions) for position in self.final_positions: if len(teams) >= position: teams[position-1] = (teams[position-1] + [None] * 4)[0:4] teams[position-1][3] = position return teams def is_finished(self): return self.call_client('is_finished', True) def get_results_link(self, suffix='leaderb.html'): return self.call_client('get_results_link', None, suffix) class MatchInfo(ResultInfo): matches = {} def __init__(self, match_config, teams, database, aliases=None, starting_positions_certain=True): ResultInfo.__init__(self, match_config, database) self.config = match_config self.teams = teams self.database = database self.aliases = {} if aliases: for team, team_aliases in aliases.iteritems(): for alias in team_aliases: self.aliases[alias] = team self._starting_positions_certain = starting_positions_certain self.info = Match() self._init_info() self._fetch_match_link() @property def submodule_path(self): return 'jfr_playoff.data.match' def _init_info(self): self.info.id = self.config['id'] MatchInfo.matches[self.info.id] = self.info self.info.running = 0 self.info.winner_matches = [] self.info.loser_matches = [] for i in range(0, 2): if 'winner' in self.config['teams'][i]: self.info.winner_matches += self.config['teams'][i]['winner'] if 'loser' in self.config['teams'][i]: self.info.loser_matches += self.config['teams'][i]['loser'] self.info.winner_matches = list(set(self.info.winner_matches)) self.info.loser_matches = list(set(self.info.loser_matches)) self.info.winner_place = self.config.get('winner', []) self.info.loser_place = self.config.get('loser', []) self.info.teams = [] def _fetch_match_link(self): link = self.call_client('get_match_link', None) if link is not None: self.info.link = link else: PlayoffLogger.get('matchinfo').info( 'match #%d link empty', self.info.id) def _get_predefined_scores(self): teams = [Team(), Team()] scores_fetched = False teams_fetched = False if 'score' in self.config: i = 0 for score in self.config['score']: if isinstance(self.config['score'], dict): teams[i].score = self.config['score'][score] try: team_no = int(score) teams[i].name = [self.teams[team_no-1][0]] except ValueError: teams[i].name = [score] teams_fetched = True else: teams[i].score = score i += 1 if i == 2: break scores_fetched = True PlayoffLogger.get('matchinfo').info( 'pre-defined scores for match #%d: %s', self.info.id, teams) return scores_fetched, teams_fetched, teams def _get_config_teams(self, teams): for i in range(0, 2): match_teams = [] possible_teams = [] if isinstance(self.config['teams'][i], basestring): match_teams = [self.config['teams'][i]] elif isinstance(self.config['teams'][i], list): match_teams = self.config['teams'][i] else: if 'winner' in self.config['teams'][i]: match_teams += [ MatchInfo.matches[winner_match].winner for winner_match in self.config['teams'][i]['winner']] possible_teams += [ MatchInfo.matches[winner_match].possible_winner for winner_match in self.config['teams'][i]['winner']] if 'loser' in self.config['teams'][i]: match_teams += [ MatchInfo.matches[loser_match].loser for loser_match in self.config['teams'][i]['loser']] possible_teams += [ MatchInfo.matches[loser_match].possible_loser for loser_match in self.config['teams'][i]['loser']] if 'place' in self.config['teams'][i]: placed_teams = [ self.teams[place-1][0] for place in self.config['teams'][i]['place']] if self._starting_positions_certain: match_teams += placed_teams possible_teams = [None] * len(placed_teams) else: possible_teams += placed_teams match_teams = [None] * len(placed_teams) teams[i].name = match_teams teams[i].possible_name = possible_teams teams[i].selected_team = self.config['selected_teams'][i] \ if 'selected_teams' in self.config else -1 teams[i].known_teams = 1 if teams[i].selected_team >= 0 else len([ team for team in match_teams if team is not None]) PlayoffLogger.get('matchinfo').info( 'config scores for match #%d: %s', self.info.id, teams) return teams def _resolve_team_aliases(self, teams): return [ self.aliases[team] if team in self.aliases else team for team in teams] def _fetch_teams_with_scores(self): (scores_fetched, teams_fetched, self.info.teams) = \ self._get_predefined_scores() if scores_fetched: self.info.running = int(self.config.get('running', -1)) if not teams_fetched: teams = self.call_client( 'fetch_teams', None, copy.deepcopy(self.info.teams)) if teams is None: PlayoffLogger.get('matchinfo').warning( 'fetching teams for match #%d failed, reverting to config', self.info.id) self.info.teams = self._get_config_teams(self.info.teams) else: self.info.teams = teams for team in range(0, len(self.info.teams)): if isinstance(self.config['teams'][team], dict): self.info.teams[team].place = self.config['teams'][team].get( 'place', self.info.teams[team].place) self.info.teams[team].name = self._resolve_team_aliases( self.info.teams[team].name) PlayoffLogger.get('matchinfo').info( 'team list after resolving aliases: %s', self.info.teams[team].name) self.info.teams[team].possible_name = self._resolve_team_aliases( self.info.teams[team].possible_name) PlayoffLogger.get('matchinfo').info( 'predicted team list after resolving aliases: %s', self.info.teams[team].possible_name) def _fetch_board_count(self): boards_played, boards_to_play = self.call_client( 'board_count', (0, 0)) if boards_played > 0: self.info.running = -1 \ if boards_played >= boards_to_play \ else boards_played def _determine_outcome(self): if (self.info.teams[0].known_teams == 1) \ and (self.info.teams[1].known_teams == 1): teams = [ team.name[max(0, team.selected_team)] for team in self.info.teams ] if self.info.running == -1: if self.info.teams[0].score > self.info.teams[1].score: self.info.winner = teams[0] self.info.loser = teams[1] else: self.info.loser = teams[0] self.info.winner = teams[1] elif self.info.running > 0: if self.info.teams[0].score > self.info.teams[1].score: self.info.possible_winner = teams[0] self.info.possible_loser = teams[1] elif self.info.teams[0].score < self.info.teams[1].score: self.info.possible_loser = teams[0] self.info.possible_winner = teams[1] def _determine_running_link(self): if self.info.link is None: return self.info.link = self.call_client('running_link', self.info.link) def set_phase_link(self, phase_link): prev_link = self.info.link if self.info.link is None: self.info.link = phase_link else: if self.info.link != '#': self.info.link = urljoin(phase_link, self.info.link) PlayoffLogger.get('matchinfo').info( 'applying phase link %s to match #%d: %s', phase_link, self.info.id, self.info.link) # re-init result info clients if (prev_link != self.info.link) and (self.info.link is not None): PlayoffLogger.get('matchinfo').info( 'config link changed, re-initializing result info client list') self.config['link'] = self.info.link ResultInfo.__init__(self, self.config, self.database) def get_info(self): self._fetch_teams_with_scores() self._fetch_board_count() self._determine_outcome() if self.info.running > 0: self._determine_running_link() return self.info
# -*- coding: utf-8 -*- class Singleton: _instance = None @classmethod def _getInstance(cls): return cls._instance @classmethod def instance(cls, *args, **kargs): cls._instance = cls(*args, **kargs) cls.instance = cls._getInstance return cls._instance
import numpy as np import pandas as pd import pytest from prereise.gather.hydrodata.eia.decompose_profile import get_profile_by_state def test_get_profile_argument_type(): arg = ((1, "WA"), (pd.Series(dtype=np.float64), 1)) for a in arg: with pytest.raises(TypeError): get_profile_by_state(a[0], a[1]) def test_get_profile_argument_value(): a = (pd.Series(dtype=np.float64), "Canada") with pytest.raises(ValueError): get_profile_by_state(a[0], a[1])
import rx import rx.operators as ops import rxsci as rs def test_lag1(): source = [1, 2, 3, 4, 5, 6, 7, 8, 9] actual_result = [] expected_result = [ (1, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), ] rx.from_(source).pipe( rs.state.with_memory_store( rs.data.lag(1), ), ).subscribe(on_next=actual_result.append) assert actual_result == expected_result def test_lag_without_store(): source = [1, 2, 3, 4, 5, 6, 7, 8, 9] actual_error = [] rx.from_(source).pipe( rs.data.lag(3), ).subscribe(on_error=actual_error.append) assert type(actual_error[0]) is ValueError def test_lag1_without_store(): source = [1, 2, 3, 4, 5, 6, 7, 8, 9] actual_error = [] rx.from_(source).pipe( rs.data.lag(1), ).subscribe(on_error=actual_error.append) assert type(actual_error[0]) is ValueError def test_lag1_mux(): source = [ rs.OnCreateMux((1 ,None)), rs.OnNextMux((1, None), 1), rs.OnNextMux((1, None), 2), rs.OnNextMux((1, None), 3), rs.OnNextMux((1, None), 4), rs.OnCompletedMux((1, None)), ] actual_result = [] rx.from_(source).pipe( rs.cast_as_mux_observable(), rs.state.with_memory_store( rs.data.lag(1), ), ).subscribe(on_next=actual_result.append) assert actual_result == [ rs.OnCreateMux((1 ,None)), rs.OnNextMux((1, None), (1,1)), rs.OnNextMux((1, None), (1,2)), rs.OnNextMux((1, None), (2,3)), rs.OnNextMux((1, None), (3,4)), rs.OnCompletedMux((1, None)), ] def test_lag(): source = [1, 2, 3, 4, 5, 6, 7, 8, 9] actual_result = [] expected_result = [ (1, 1), (1, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8), (7, 9), ] rx.from_(source).pipe( rs.state.with_memory_store( rs.data.lag(2), ), ).subscribe(on_next=actual_result.append) assert actual_result == expected_result def test_lag_mux(): source = [ rs.OnCreateMux((1 ,None)), rs.OnNextMux((1, None), 1), rs.OnNextMux((1, None), 2), rs.OnNextMux((1, None), 3), rs.OnNextMux((1, None), 4), rs.OnNextMux((1, None), 5), rs.OnNextMux((1, None), 6), rs.OnCompletedMux((1, None)), ] actual_result = [] rx.from_(source).pipe( rs.cast_as_mux_observable(), rs.state.with_memory_store( rs.data.lag(2), ), ).subscribe(on_next=actual_result.append) assert actual_result == [ rs.OnCreateMux((1 ,None)), rs.OnNextMux((1, None), (1, 1)), rs.OnNextMux((1, None), (1, 2)), rs.OnNextMux((1, None), (1, 3)), rs.OnNextMux((1, None), (2, 4)), rs.OnNextMux((1, None), (3, 5)), rs.OnNextMux((1, None), (4, 6)), rs.OnCompletedMux((1, None)), ]
import re import string def extract_tags(txt): """ create tags for flashes :param text: flashes content :return: tags created for the flash """ txt = txt.translate(txt.maketrans("","", string.punctuation)) easy_injuries = ['קל'] hard_injuries = ['קשה', 'אנוש'] middle_injuries = ['בינוני'] injuries_words = ['נפצע','פצוע'] dead_words = ['הרג', 'מות', 'הרוג', 'מוות', 'קטלני'] veichles = ['אופנוע','רכב', 'אופניים', 'משאית', 'אוטובוס', 'טרקטורון', 'קורקינט', 'טרקטור', 'אופניים חשמליים', 'קורקינט חשמלי'] men_words = ['גבר', 'צעיר', 'נהג' , 'הולך רגל', 'ילד', 'נער', 'פעוט', 'תינוק', 'קשיש', 'רוכב'] women_words = ['אישה', 'צעירה', 'נהגת', 'הולכת רגל', 'ילדה', 'נערה', 'פעוטה', 'תינוקת', 'קשישה', 'רוכבת', 'אשה', 'נשים'] tags = [] reg_exp = r'\s?{} ' if any([re.search(reg_exp.format(option), txt) for option in easy_injuries]): tags.append('פצועים-קל') if any([re.search(reg_exp.format(option), txt) for option in men_words]): tags.append('גבר') if any([re.search(reg_exp.format(option), txt) for option in women_words]): tags.append('אישה') if any([option in txt for option in hard_injuries]): tags.append('פצועים-קשה') if any([option in txt for option in middle_injuries]): tags.append('פצועים-בינוני') if any([option in txt for option in injuries_words]): tags.append('פצועים') if any([option in txt for option in dead_words]): tags.append('הרוגים') veichles_tags = filter(lambda v: v in txt, veichles) tags.extend(veichles_tags) return ','.join(tags)
import re __all__ = ("load_config", "get_setting_value") def load_from_django(): from django.conf import settings DEFAULT_CONFIG = { "CACHE": not settings.DEBUG, "IGNORE": [r".+\.hot-update.js", r".+\.map"], "LOADER_CLASS": "webpack_boilerplate.loader.WebpackLoader", } user_config = dict(DEFAULT_CONFIG, **getattr(settings, "WEBPACK_LOADER", {})) user_config["ignores"] = [re.compile(I) for I in user_config["IGNORE"]] user_config["web_framework"] = "django" return user_config def load_from_flask(): from flask import current_app DEFAULT_CONFIG = { "CACHE": not current_app.config["DEBUG"], "IGNORE": [r".+\.hot-update.js", r".+\.map"], "LOADER_CLASS": "webpack_boilerplate.loader.WebpackLoader", } user_config = dict(DEFAULT_CONFIG, **current_app.config["WEBPACK_LOADER"]) user_config["ignores"] = [re.compile(I) for I in user_config["IGNORE"]] user_config["web_framework"] = "flask" return user_config def load_config(name): try: import django return load_from_django() except ImportError: pass try: import flask return load_from_flask() except ImportError: pass raise Exception("can not load config from this project") def get_setting_value(key): try: import django from django.conf import settings return settings.get(key, None) except ImportError: pass try: import flask from flask import current_app map = {"STATIC_URL": "STATIC_URL"} return current_app.config.get(map[key], None) except ImportError: pass def setup_jinja2_ext(app): """ Run by flask app """ from .contrib.jinja2ext import WebpackExtension app.jinja_env.add_extension(WebpackExtension)
class Solution: # time: O(1) # space: O(1) def findComplement(self, num: int) -> int: i = 1 while i <= num: i = i << 1 return (i - 1) ^ num
import requests class HTTP: @staticmethod def get(url, return_json=True): r = requests.get(url) ''' if r.status_code == 200: if return_json: return r.json() else: return r.text else: return {} ''' if r.status_code != 200: return {} if return_json else '' return r.json() if return_json else r.text
# -*- coding: utf-8 -*- import itertools import re from copy import copy import pandas as pd import requests from zvt.contract.api import get_entity_code from zvt.utils import to_pd_timestamp, normal_index_df WORLD_BANK_URL = "http://api.worldbank.org/v2" # thanks to https://github.com/mwouts/world_bank_data _economy_indicator_map = { "population": "SP.POP.TOTL", "gdp": "NY.GDP.MKTP.CD", "gdp_per_capita": "NY.GDP.PCAP.CD", "gdp_per_employed": "SL.GDP.PCAP.EM.KD", "gdp_growth": "NY.GDP.MKTP.KD.ZG", "agriculture_growth": "NV.AGR.TOTL.KD.ZG", "industry_growth": "NV.IND.TOTL.KD.ZG", "manufacturing_growth": "NV.IND.MANF.KD.ZG", "service_growth": "NV.SRV.TOTL.KD.ZG", "consumption_growth": "NE.CON.TOTL.KD.ZG", "capital_growth": "NE.GDI.TOTL.KD.ZG", "exports_growth": "NE.EXP.GNFS.KD.ZG", "imports_growth": "NE.IMP.GNFS.KD.ZG", "gni": "NY.GNP.ATLS.CD", "gni_per_capita": "NY.GNP.PCAP.CD", "gross_saving": "NY.GNS.ICTR.ZS", "cpi": "FP.CPI.TOTL", "unemployment_rate": "SL.UEM.TOTL.ZS", "fdi_of_gdp": "BX.KLT.DINV.WD.GD.ZS", } def _collapse(values): """Collapse multiple values to a colon-separated list of values""" if isinstance(values, str): return values if values is None: return "all" if isinstance(values, list): return ";".join([_collapse(v) for v in values]) return str(values) def _extract_preferred_field(data, id_or_value): """In case the preferred representation of data when the latter has multiple representations""" if not id_or_value: return data if not data: return "" if isinstance(data, dict): if id_or_value in data: return data[id_or_value] if isinstance(data, list): return ",".join([_extract_preferred_field(i, id_or_value) for i in data]) return data def _wb_get(paths: dict = None, **kwargs): params = copy(kwargs) params.setdefault("format", "json") params.setdefault("per_page", 20000) url = "/".join([WORLD_BANK_URL] + list(itertools.chain.from_iterable([(k, _collapse(paths[k])) for k in paths]))) response = requests.get(url=url, params=params) response.raise_for_status() try: data = response.json() except ValueError: raise ValueError( "{msg}\nurl={url}\nparams={params}".format(msg=_extract_message(response.text), url=url, params=params) ) if isinstance(data, list) and data and "message" in data[0]: try: msg = data[0]["message"][0]["value"] except (KeyError, IndexError): msg = str(msg) raise ValueError("{msg}\nurl={url}\nparams={params}".format(msg=msg, url=url, params=params)) # Redo the request and get the full information when the first response is incomplete if isinstance(data, list): page_information, data = data if "page" not in params: current_page = 1 while current_page < int(page_information["pages"]): params["page"] = current_page = int(page_information["page"]) + 1 response = requests.get(url=url, params=params) response.raise_for_status() page_information, new_data = response.json() data.extend(new_data) if not data: raise RuntimeError("The request returned no data:\nurl={url}\nparams={params}".format(url=url, params=params)) return data def _extract_message(msg): """'<?xml version="1.0" encoding="utf-8"?> <wb:error xmlns:wb="http://www.worldbank.org"> <wb:message id="175" key="Invalid format">The indicator was not found. It may have been deleted or archived.</wb:message> </wb:error>'""" if "wb:message" not in msg: return msg return re.sub( re.compile(".*<wb:message[^>]*>", re.DOTALL), "", re.sub(re.compile("</wb:message>.*", re.DOTALL), "", msg) ) def _get_meta(name, filters=None, expected=None, **params): """Request data and return it in the form of a data frame""" filters = _collapse(filters) id_or_value = "value" if expected and id_or_value not in expected: raise ValueError("'id_or_value' should be one of '{}'".format("', '".join(expected))) data = _wb_get(paths={name: filters}, **params) # We get a list (countries) of dictionary (properties) columns = data[0].keys() records = {} for col in columns: records[col] = [_extract_preferred_field(cnt[col], id_or_value) for cnt in data] return pd.DataFrame(records, columns=columns) def get_countries(): df = _get_meta("country", expected=["id", "iso2code", "value"]) for col in ["latitude", "longitude"]: df[col] = pd.to_numeric(df[col]) df.rename( columns={ "iso2Code": "code", "incomeLevel": "income_level", "lendingType": "lending_type", "capitalCity": "capital_city", }, inplace=True, ) df["entity_type"] = "country" df["exchange"] = "galaxy" df["entity_id"] = df[["entity_type", "exchange", "code"]].apply(lambda x: "_".join(x.astype(str)), axis=1) df["id"] = df["entity_id"] return df def get_indicators(indicator=None, language=None, id_or_value=None, **params): """Return a DataFrame that describes one, multiple or all indicators, indexed by the indicator id. :param indicator: None (all indicators), the id of an indicator, or a list of multiple ids :param language: Desired language :param id_or_value: Choose either 'id' or 'value' for columns 'source' and 'topics'""" if id_or_value == "iso2code": id_or_value = "id" return _get_meta( "indicator", indicator, language=language, id_or_value=id_or_value, expected=["id", "value"], **params ) def get_indicator_data(indicator, indicator_name=None, country=None, date=None): datas = _wb_get(paths={"country": country, "indicator": indicator}, date=date) records = [ { "code": item["country"]["id"], "timestamp": to_pd_timestamp(item["date"]), item["indicator"]["id"] if not indicator_name else indicator_name: item["value"], } for item in datas ] df = pd.DataFrame.from_records(data=records) df = df.set_index(["code", "timestamp"]) return df def get_regions(region=None, language=None, **params): """Return a DataFrame that describes one, multiple or all regions, indexed by the region id. :param region: None (all regions), the id of a region, or a list of multiple ids :param language: Desired language""" return _get_meta("region", region, language, **params) def get_sources(source=None, language=None, **params): """Return a DataFrame that describes one, multiple or all sources, indexed by the source id. :param source: None (all sources), the id of a source, or a list of multiple ids :param language: Desired language""" return _get_meta("source", source, language, **params) def get_topics(topic=None, language=None, **params): """Return a DataFrame that describes one, multiple or all sources, indexed by the source id. :param topic: None (all topics), the id of a topic, or a list of multiple ids :param language: Desired language""" return _get_meta("topic", topic, language, **params) def get_incomelevels(incomelevel=None, language=None, **params): """Return a DataFrame that describes one, multiple or all income levels, indexed by the IL id. :param incomelevel: None (all income levels), the id of an income level, or a list of multiple ids :param language: Desired language""" return _get_meta("incomelevel", incomelevel, language, **params) def get_lendingtypes(lendingtype=None, language=None, **params): """Return a DataFrame that describes one, multiple or all lending types, indexed by the LT id. :param lendingtype: None (all lending types), the id of a lending type, or a list of multiple ids :param language: Desired language""" return _get_meta("lendingtype", lendingtype, language, **params) def get_economy_data(entity_id, indicators=None, date=None): country = get_entity_code(entity_id=entity_id) if not indicators: indicators = _economy_indicator_map.keys() dfs = [] for indicator in indicators: data = get_indicator_data( indicator=_economy_indicator_map.get(indicator), indicator_name=indicator, country=country, date=date ) dfs.append(data) df = pd.concat(dfs, axis=1) df = df.reset_index(drop=False) df["entity_id"] = entity_id df["id"] = df[["entity_id", "timestamp"]].apply(lambda x: "_".join(x.astype(str)), axis=1) df = normal_index_df(df, drop=False) return df if __name__ == "__main__": # df = get_countries() # print(df) df = get_economy_data(entity_id="country_galaxy_CN") print(df) # df = get_sources() # print(df) # the __all__ is generated __all__ = [ "get_countries", "get_indicators", "get_indicator_data", "get_regions", "get_sources", "get_topics", "get_incomelevels", "get_lendingtypes", "get_economy_data", ]
from .ParentClass import ParentClass from .ParentPlural import ParentPlural from .ParentPluralDict import ParentPluralDict from .ParentPluralList import ParentPluralList
import sys PY3 = sys.version_info[0] == 3 # zcml is not yet compatible with python 3, but we need to provide compatible # syntax in case this module gets imported by the test runner under python 3 if not PY3: from pyramid_zcml import IRouteLikeDirective from pyramid_zcml import with_context from zope.schema import TextLine from zope.configuration.fields import GlobalObject from pyramid.exceptions import ConfigurationError from pyramid_handlers import add_handler def u(val): return unicode(val) class IHandlerDirective(IRouteLikeDirective): route_name = TextLine(title=u('route_name'), required=True) handler = GlobalObject(title=u('handler'), required=True) action = TextLine(title=u("action"), required=False) def handler(_context, route_name, pattern, handler, action=None, view=None, view_for=None, permission=None, factory=None, for_=None, header=None, xhr=False, accept=None, path_info=None, request_method=None, request_param=None, custom_predicates=(), view_permission=None, view_attr=None, renderer=None, view_renderer=None, view_context=None, traverse=None, use_global_views=False): """ Handle ``handler`` ZCML directives """ # the strange ordering of the request kw args above is for b/w # compatibility purposes. # these are route predicates; if they do not match, the next route # in the routelist will be tried if view_context is None: view_context = view_for or for_ view_permission = view_permission or permission view_renderer = view_renderer or renderer if pattern is None: raise ConfigurationError('handler directive must include a ' '"pattern"') config = with_context(_context) if not hasattr(config, 'add_handler'): config.add_directive('add_handler', add_handler) config.add_handler( route_name, pattern, handler, action=action, factory=factory, header=header, xhr=xhr, accept=accept, path_info=path_info, request_method=request_method, request_param=request_param, custom_predicates=custom_predicates, view=view, view_context=view_context, view_permission=view_permission, view_renderer=view_renderer, view_attr=view_attr, use_global_views=use_global_views, traverse=traverse, )
from seguridad.relaciones_models import * from seguridad.maestros_models import * # menu = ReMenu.objects.filter(padre_id__isnull=True).values() # for hijo in menu: def menu_prueba(): obj = [{'id': 1, 'nombre': 'Luis Farfan', 'padre_id': None}, {'id': 2, 'nombre': 'Ana Vega', 'padre_id': None}, {'id': 3, 'nombre': 'Mayra Diaz', 'padre_id': None}, {'id': 4, 'nombre': 'Eduardo Cabrera', 'padre_id': None}, {'id': 5, 'nombre': 'Gustavo Peralta', 'padre_id': 1}, {'id': 6, 'nombre': 'Omar Hernandez', 'padre_id': 1}, {'id': 7, 'nombre': 'Gustavo Peralta', 'padre_id': 1}, {'id': 8, 'nombre': 'Diego Paz', 'padre_id': 5}, {'id': 9, 'nombre': 'Marco', 'padre_id': 5}] nones = [] withids = [] for menus in obj: if menus.get('padre_id') == None: nones.append(menus) else: withids.append(menus) list_nones = list(nones) list_withids = list(withids) print list_nones print '**' * 30 for k, v in enumerate(list_nones): for key, valor in enumerate(list_withids): if valor['padre_id'] == v['id']: list_nones[k]['items'] = list_withids[key] print list_nones def procedure(): from django.db import connection cursor = connection.cursor() cursor.execute('exec getMenubyUser %s', [3]) columns = [column[0] for column in cursor.description] results = [] for row in cursor.fetchall(): results.append(dict(zip(columns, row))) results = list(results) return results #print type([]) menu=procedure() padres = [] hijos = [] for k,v in enumerate(menu): if menu[k]['PADRE_ID'] == None: padres.append(menu[k]) else: hijos.append(menu[k]) for k,v in enumerate(padres): listadict = [] for kk,vv in enumerate(hijos): if hijos[kk]['PADRE_ID']==padres[k]['ID_MENU']: listadict.append(dict(hijos[kk])) padres[k]['hijos']=listadict print padres
from io import BytesIO as _BytesIO from copy import deepcopy from abc import ABCMeta, abstractmethod import bitcoin.core from bitcoin.core.script import * from binascii import unhexlify, hexlify from base58 import b58encode, b58decode from hashlib import sha256 from ecdsa import SigningKey from ecdsa.util import sigencode_der_canonize, number_to_string from urllib.request import urlopen, Request from json import loads import sys _bchr = chr _bord = ord # if sys.version > '3': long = int def _bchr(x): return bytes([x]) def _bord(x): return x # else: # if Python 2 # from cStringIO import StringIO as _BytesIO # keys.py def ecdsa_tx_sign(unsigned_tx, sk, hashflag=SIGHASH_ALL, deterministic=True): """ Performs and ECDSA sign over a given transaction using a given secret key. :param unsigned_tx: unsigned transaction that will be double-sha256 and signed. :type unsigned_tx: hex str :param sk: ECDSA private key that will sign the transaction. :type sk: SigningKey :param hashflag: hash type that will be used during the signature process and will identify the signature format. :type hashflag: int :param deterministic: Whether the signature is performed using a deterministic k or not. Set by default. :type deterministic: bool :return: """ # Encode the hash type as a 4-byte hex value. if hashflag in [SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_NONE]: hc = int2bytes(hashflag, 4) else: raise Exception("Wrong hash flag.") # ToDo: Deal with SIGHASH_ANYONECANPAY # sha-256 the unsigned transaction together with the hash type (little endian). h = sha256(unhexlify(unsigned_tx + change_endianness(hc))).digest() # Sign the transaction (using a sha256 digest, that will conclude with the double-sha256) # If deterministic is set, the signature will be performed deterministically choosing a k from the given transaction if deterministic: s = sk.sign_deterministic( h, hashfunc=sha256, sigencode=sigencode_der_canonize) # Otherwise, k will be chosen at random. Notice that this can lead to a private key disclosure if two different # messages are signed using the same k. else: s = sk.sign(h, hashfunc=sha256, sigencode=sigencode_der_canonize) # Finally, add the hashtype to the end of the signature as a 2-byte big endian hex value. return hexlify(s) + hc[-2:] def serialize_pk(pk, compressed=True): """ Serializes a ecdsa.VerifyingKey (public key). :param compressed: Indicates if the serialized public key will be either compressed or uncompressed. :type compressed: bool :param pk: ECDSA VerifyingKey object (public key to be serialized). :type pk: ecdsa.VerifyingKey :return: serialized public key. :rtype: hex str """ # Updated with code based on PR #54 from python-ecdsa until the PR gets merged: # https://github.com/warner/python-ecdsa/pull/54 x_str = number_to_string(pk.pubkey.point.x(), pk.pubkey.order) if compressed: if pk.pubkey.point.y() & 1: prefix = '03' else: prefix = '02' s_key = prefix + hexlify(x_str) else: s_key = '04' + hexlify(pk.to_string()) return s_key # utils.py def parse_script_type(t): """ Parses a script type obtained from a query to blockcyper's API. :param t: script type to be parsed. :type t: str :return: The parsed script type. :rtype: str """ if t == 'pay-to-multi-pubkey-hash': r = "P2MS" elif t == 'pay-to-pubkey': r = "P2PK" elif t == 'pay-to-pubkey-hash': r = "P2PKH" elif t == 'pay-to-script-hash': r = "P2PSH" else: r = "unknown" return r def get_prev_ScriptPubKey(tx_id, index, network='test'): """ Gets the ScriptPubKey of a given transaction id and its type, by querying blockcyer's API. :param tx_id: Transaction identifier to be queried. :type tx_id: hex str :param index: Index of the output from the transaction. :type index: int :param network: Network in which the transaction can be found (either mainnet or testnet). :type network: hex str :return: The corresponding ScriptPubKey and its type. :rtype hex str, str """ if network in ['main', 'mainnet']: base_url = "https://api.blockcypher.com/v1/btc/main/txs/" elif network in ['test', 'testnet']: base_url = "https://api.blockcypher.com/v1/btc/test3/txs/" else: raise Exception("Bad network.") request = Request(base_url + tx_id) header = 'User-agent', 'Mozilla/5.0' request.add_header("User-agent", header) r = urlopen(request) data = loads(r.read()) script = data.get('outputs')[index].get('script') t = data.get('outputs')[index].get('script_type') return script, parse_script_type(t) def encode_varint(value): """ Encodes a given integer value to a varint. It only used the four varint representation cases used by bitcoin: 1-byte, 2-byte, 4-byte or 8-byte integers. :param value: The integer value that will be encoded into varint. :type value: int :return: The varint representation of the given integer value. :rtype: str """ # The value is checked in order to choose the size of its final representation. # 0xFD(253), 0xFE(254) and 0xFF(255) are special cases, since are the prefixes defined for 2-byte, 4-byte # and 8-byte long values respectively. if value < pow(2, 8) - 3: size = 1 varint = int2bytes(value, size) # No prefix else: if value < pow(2, 16): size = 2 prefix = 253 # 0xFD elif value < pow(2, 32): size = 4 prefix = 254 # 0xFE elif value < pow(2, 64): size = 8 prefix = 255 # 0xFF else: raise Exception("Wrong input data size") varint = format(prefix, 'x') + \ change_endianness(int2bytes(value, size)) return varint def decode_varint(varint): """ Decodes a varint to its standard integer representation. :param varint: The varint value that will be decoded. :type varint: str :return: The standard integer representation of the given varint. :rtype: int """ # The length of the varint is check to know whether there is a prefix to be removed or not. if len(varint) > 2: decoded_varint = int(change_endianness(varint[2:]), 16) else: decoded_varint = int(varint, 16) return decoded_varint def int2bytes(a, b): """ Converts a given integer value (a) its b-byte representation, in hex format. :param a: Value to be converted. :type a: int :param b: Byte size to be filled. :type b: int :return: The b-bytes representation of the given value (a) in hex format. :rtype: hex str """ m = pow(2, 8*b) - 1 if a > m: raise Exception(str(a) + " is too big to be represented with " + str(b) + " bytes. Maximum value is " + str(m) + ".") return ('%0' + str(2 * b) + 'x') % a def parse_varint(tx): """ Parses a given transaction for extracting an encoded varint element. :param tx: Transaction where the element will be extracted. :type tx: TX :return: The b-bytes representation of the given value (a) in hex format. :rtype: hex str """ # First of all, the offset of the hex transaction if moved to the proper position (i.e where the varint should be # located) and the length and format of the data to be analyzed is checked. data = tx.hex[tx.offset:] assert (len(data) > 0) size = int(data[:2], 16) assert (size <= 255) # Then, the integer is encoded as a varint using the proper prefix, if needed. if size <= 252: # No prefix storage_length = 1 elif size == 253: # 0xFD storage_length = 3 elif size == 254: # 0xFE storage_length = 5 elif size == 255: # 0xFF storage_length = 9 else: raise Exception("Wrong input data size") # Finally, the storage length is used to extract the proper number of bytes from the transaction hex and the # transaction offset is updated. varint = data[:storage_length * 2] tx.offset += storage_length * 2 return varint def parse_element(tx, size): """ Parses a given transaction to extract an element of a given size. :param tx: Transaction where the element will be extracted. :type tx: TX :param size: Size of the parameter to be extracted. :type size: int :return: The extracted element. :rtype: hex str """ element = tx.hex[tx.offset:tx.offset + size * 2] tx.offset += size * 2 return element def change_endianness(x): """ Changes the endianness (from BE to LE and vice versa) of a given value. :param x: Given value which endianness will be changed. :type x: hex str :return: The opposite endianness representation of the given value. :rtype: hex str """ # If there is an odd number of elements, we make it even by adding a 0 if (len(x) % 2) == 1: x += "0" y = hexlify(unhexlify(x)[::-1]).decode() # y = x.encode() # .decode('hex') # z = hexlify(y[::-1]).decode() return y def check_signature(signature): """ Checks if a given string is a signature (or at least if it is formatted as if it is). :param signature: Signature to be checked. :type signature: hex str :return: True if the signatures matches the format, raise exception otherwise. :rtype: bool """ l = (len(signature[4:]) - 2) / 2 if signature[:2] != "30": raise Exception("Wrong signature format.") elif int(signature[2:4], 16) != l: raise Exception("Wrong signature length " + str(l)) else: return True def check_script(script): """ Checks if a given string is a script (hash160) (or at least if it is formatted as if it is). :param script: Script to be checked. :type script: hex str :return: True if the signatures matches the format, raise exception otherwise. :rtype: bool """ if not isinstance(script, str): raise Exception("Wrong script format.") elif len(script)/2 != 20: raise Exception("Wrong signature length " + str(len(script)/2)) else: return True def check_address(btc_addr, network='test'): """ Checks if a given string is a Bitcoin address for a given network (or at least if it is formatted as if it is). :param btc_addr: Bitcoin address to be checked. :rtype: hex str :param network: Network to be checked (either mainnet or testnet). :type network: hex str :return: True if the Bitcoin address matches the format, raise exception otherwise. """ if network in ['test', "testnet"] and btc_addr[0] not in ['m', 'n']: raise Exception("Wrong testnet address format.") elif network in ['main', 'mainnet'] and btc_addr[0] != '1': raise Exception("Wrong mainnet address format.") elif network not in ['test', 'testnet', 'main', 'mainnet']: raise Exception("Network must be test/testnet or main/mainnet") elif len(btc_addr) not in range(26, 35+1): raise Exception( "Wrong address format, Bitcoin addresses should be 27-35 hex char long.") else: return True def check_public_key(pk): """ Checks if a given string is a public (or at least if it is formatted as if it is). :param pk: ECDSA public key to be checked. :type pk: hex str :return: True if the key matches the format, raise exception otherwise. :rtype: bool """ prefix = pk[0:2] l = len(pk) if prefix not in ["02", "03", "04"]: raise Exception("Wrong public key format.") if prefix == "04" and l != 130: raise Exception( "Wrong length for an uncompressed public key: " + str(l)) elif prefix in ["02", "03"] and l != 66: raise Exception("Wrong length for a compressed public key: " + str(l)) else: return True def is_public_key(pk): """ Encapsulates check_public_key function as a True/False option. :param pk: ECDSA public key to be checked. :type pk: hex str :return: True if pk is a public key, false otherwise. """ try: return check_public_key(pk) except: return False def is_btc_addr(btc_addr, network='test'): """ Encapsulates check_address function as a True/False option. :param btc_addr: Bitcoin address to be checked. :type btc_addr: hex str :param network: The network to be checked (either mainnet or testnet). :type network: str :return: True if btc_addr is a public key, false otherwise. """ try: return check_address(btc_addr, network) except: return False def is_script(script): """ Encapsulates check_script function as a True/False option. :param script: Script to be checked. :type script: hex str :return: True if script is a script, false otherwise. """ try: return check_script(script) except: return False # wallet.py def btc_addr_to_hash_160(btc_addr): """ Calculates the RIPEMD-160 hash from a given Bitcoin address :param btc_addr: Bitcoin address. :type btc_addr: str :return: The corresponding RIPEMD-160 hash. :rtype: hex str """ # Base 58 decode the Bitcoin address. decoded_addr = b58decode(btc_addr) # Covert the address from bytes to hex. decoded_addr_hex = hexlify(decoded_addr) # Obtain the RIPEMD-160 hash by removing the first and four last bytes of the decoded address, corresponding to # the network version and the checksum of the address. h160 = decoded_addr_hex[2:-8] return h160 # Classes class Script: """ Defines the class Script which includes two subclasses, InputScript and OutputScript. Every script type have two custom 'constructors' (from_hex and from_human), and four templates for the most common standard script types (P2PK, P2PKH, P2MS and P2PSH). """ __metaclass__ = ABCMeta def __init__(self): self.content = "" self.type = "unknown" @classmethod def from_hex(cls, hex_script): """ Builds a script from a serialized one (it's hexadecimal representation). :param hex_script: Serialized script. :type hex_script: hex str :return: Script object with the serialized script as it's content. :rtype Script """ script = cls() script.content = hex_script return script @classmethod def from_human(cls, data): """ Builds a script from a human way of writing them, using the Bitcoin Scripting language terminology. e.g: OP_DUP OP_HASH160 <hash_160> OP_EQUALVERIFY OP_CHECKSIG Every piece of data included in the script (everything except for op_codes) must be escaped between '<' '>'. :param data: Human readable Bitcoin Script (with data escaped between '<' '>') :type data: hex str :return: Script object with the serialization from the input script as it's content. :rtype. hex Script """ script = cls() script.content = script.serialize(data) return script @staticmethod def deserialize(script): """ Deserializes a serialized script (goes from hex to human). e.g: deserialize('76a914b34bbaac4e9606c9a8a6a720acaf3018c9bc77c988ac') = OP_DUP OP_HASH160 <b34bbaac4e9606c9a8a6a720acaf3018c9bc77c9> OP_EQUALVERIFY OP_CHECKSIG :param script: Serialized script to be deserialized. :type script: hex str :return: Deserialized script :rtype: hex str """ start = "CScript([" end = "])" ps = CScript(unhexlify(script)).__repr__() ps = ps[ps.index(start) + len(start): ps.index(end)].split(", ") for i in range(len(ps)): if ps[i].startswith('x('): ps[i] = ps[i][3:-2] ps[i] = '<' + ps[i] + '>' return " ".join(ps) @staticmethod def serialize(data): """ Serializes a scrip from a deserialized one (human readable) (goes from human to hex) :param data: Human readable script. :type data: hex str :return: Serialized script. :rtype: hex str """ hex_string = "" for e in data.split(" "): if e[0] == "<" and e[-1] == ">": hex_string += hexlify( CScriptOp.encode_op_pushdata(unhexlify(e[1:-1]))) elif eval(e) in OPCODE_NAMES: hex_string += format(eval(e), '02x') else: raise Exception return hex_string def get_element(self, i): """ Returns the ith element from the script. If -1 is passed as index, the last element is returned. :param i: The index of the selected element. :type i: int :return: The ith elements of the script. :rtype: str """ return Script.deserialize(self.content).split()[i] @abstractmethod def P2PK(self): pass @abstractmethod def P2PKH(self): pass @abstractmethod def P2MS(self): pass @abstractmethod def P2SH(self): pass class InputScript(Script): """ Defines an InputScript (ScriptSig) class that inherits from script. """ @classmethod def P2PK(cls, signature): """ Pay-to-PubKey template 'constructor'. Builds a P2PK InputScript from a given signature. :param signature: Transaction signature. :type signature: hex str :return: A P2PK sScriptSig built using the given signature. :rtype: hex str """ script = cls() if check_signature(signature): script.type = "P2PK" script.content = script.serialize("<" + signature + ">") return script @classmethod def P2PKH(cls, signature, pk): """ Pay-to-PubKeyHash template 'constructor'. Builds a P2PKH InputScript from a given signature and a public key. :param signature: Transaction signature. :type signature: hex str :param pk: Public key from the same key pair of the private key used to perform the signature. :type pk: hex str :return: A P2PKH ScriptSig built using the given signature and the public key. :rtype: hex str """ script = cls() if check_signature(signature) and check_public_key(pk): script.type = "P2PKH" script.content = script.serialize( "<" + signature + "> <" + pk + ">") return script @classmethod def P2MS(cls, sigs): """ Pay-to-Multisig template 'constructor'. Builds a P2MS InputScript from a given list of signatures. :param sigs: List of transaction signatures. :type sigs: list :return: A P2MS ScriptSig built using the given signatures list. :rtype: hex str """ script = cls() s = "OP_0" for sig in sigs: if check_signature(sig): s += " <" + sig + ">" script.type = "P2MS" script.content = script.serialize(s) return script @classmethod def P2SH(cls, data, s): """ Pay-to-ScriptHash template 'constructor'. Builds a P2SH InputScript from a given script. :param data: Input data that will be evaluated with the script content once its hash had been checked against the hash provided by the OutputScript. :type data: list :param s: Human readable script that hashes to the UTXO script hash that the transaction tries to redeem. :type s: hex str :return: A P2SH ScriptSig (RedeemScript) built using the given script. :rtype: hex str """ script = cls() for d in data: if isinstance(d, str) and d.startswith("OP"): # If an OP_CODE is passed as data (such as OP_0 in multisig transactions), the element is encoded as is. script.content += d + " " else: # Otherwise, the element is encoded as data. script.content += "<" + str(d) + "> " script.type = "P2SH" script.content = script.serialize( script.content + "<" + script.serialize(s) + ">") return script class OutputScript(Script): """ Defines an OutputScript (ScriptPubKey) class that inherits from script. """ @classmethod def P2PK(cls, pk): """ Pay-to-PubKey template 'constructor'. Builds a P2PK OutputScript from a given public key. :param pk: Public key to which the transaction output will be locked to. :type pk: hex str :return: A P2PK ScriptPubKey built using the given public key. :rtype: hex str """ script = cls() if check_public_key(pk): script.type = "P2PK" script.content = script.serialize("<"+pk+"> OP_CHECKSIG") return script @classmethod def P2PKH(cls, data, network='test', hash160=False): """ Pay-to-PubKeyHash template 'constructor'. Builds a P2PKH OutputScript from a given Bitcoin address / hash160 of a Bitcoin address and network. :param data: Bitcoin address or hash160 of a Bitcoin address to which the transaction output will be locked to. :type data: hex str :param network: Bitcoin network (either mainnet or testnet) :type network: hex str :param hash160: If set, the given data is the hash160 of a Bitcoin address, otherwise, it is a Bitcoin address. :type hash160: bool :return: A P2PKH ScriptPubKey built using the given bitcoin address and network. :rtype: hex str """ if network in ['testnet', 'test', 'mainnet', 'main']: script = cls() if not hash160 and check_address(data, network): h160 = btc_addr_to_hash_160(data) else: h160 = data script.type = "P2PKH" script.content = script.serialize( "OP_DUP OP_HASH160 <" + h160 + "> OP_EQUALVERIFY OP_CHECKSIG") return script else: raise Exception("Unknown Bitcoin network.") @classmethod def P2MS(cls, m, n, pks): """ Pay-to-Multisig template 'constructor'. Builds a P2MS OutputScript from a given list of public keys, the total number of keys and a threshold. :param m: Threshold, minimum amount of signatures needed to redeem from the output. :type m: int :param n: Total number of provided public keys. :type n: int :param pks: List of n public keys from which the m-of-n multisig output will be created. :type pks: list :return: A m-of-n Pay-to-Multisig script created using the provided public keys. :rtype: hex str """ script = cls() if n != len(pks): raise Exception("The provided number of keys does not match the expected one: " + str(len(pks)) + "!=" + str(n)) elif m not in range(1, 15) or n not in range(1, 15): raise Exception("Multisig transactions must be 15-15 at max") else: s = "OP_" + str(m) for pk in pks: if check_public_key(pk): s += " <" + pk + ">" script.type = "P2MS" script.content = script.serialize( s + " OP_" + str(n) + " OP_CHECKMULTISIG") return script @classmethod def P2SH(cls, script_hash): """ Pay-to-ScriptHash template 'constructor'. Builds a P2SH OutputScript from a given script hash. :param script_hash: Script hash to which the output will be locked to. :type script_hash: hex str :return: A P2SH ScriptPubKey built using the given script hash. :rtype: hex str """ script = cls() l = len(script_hash) if l != 40: raise Exception("Wrong RIPEMD-160 hash length: " + str(l)) else: script.type = "P2SH" script.content = script.serialize( "OP_HASH160 <" + script_hash + "> OP_EQUAL") return script class TX: """ Defines a class TX (transaction) that holds all the modifiable fields of a Bitcoin transaction, such as version, number of inputs, reference to previous transactions, input and output scripts, value, etc. """ def __init__(self): self.version = None self.inputs = None self.outputs = None self.nLockTime = None self.prev_tx_id = [] self.prev_out_index = [] self.scriptSig = [] self.scriptSig_len = [] self.nSequence = [] self.value = [] self.scriptPubKey = [] self.scriptPubKey_len = [] self.offset = 0 self.hex = "" @classmethod def build_from_hex(cls, hex_tx): """ Alias of deserialize class method. :param hex_tx: Hexadecimal serialized transaction. :type hex_tx: hex str :return: The transaction build using the provided hex serialized transaction. :rtype: TX """ return cls.deserialize(hex_tx) @classmethod def build_from_scripts(cls, prev_tx_id, prev_out_index, value, scriptSig, scriptPubKey, fees=None): """ Builds a transaction from already built Input and Output scripts. This builder should be used when building custom transaction (Non-standard). :param prev_tx_id: Previous transaction id. :type prev_tx_id: either str or list of str :param prev_out_index: Previous output index. Together with prev_tx_id represent the UTXOs the current transaction is aiming to redeem. :type prev_out_index: either str or list of str :param value: Value in Satoshis to be spent. :type value: either int or list of int :param scriptSig: Input script containing the restrictions that will lock the transaction. :type scriptSig: either InputScript or list of InputScript :param scriptPubKey: Output script containing the redeem fulfilment conditions. :type scriptPubKey: either OutputScript or list of OutputScript :param fees: Fees that will be applied to the transaction. If set, fees will be subtracted from the last output. :type fees: int :return: The transaction build using the provided scripts. :rtype: TX """ tx = cls() # Normalize all parameters if isinstance(prev_tx_id, str): prev_tx_id = [prev_tx_id] if isinstance(prev_out_index, int): prev_out_index = [prev_out_index] if isinstance(value, int): value = [value] if isinstance(scriptSig, InputScript): scriptSig = [scriptSig] if isinstance(scriptPubKey, OutputScript): scriptPubKey = [scriptPubKey] if len(prev_tx_id) is not len(prev_out_index) or len(prev_tx_id) is not len(scriptSig): raise Exception( "The number ofs UTXOs to spend must match with the number os ScriptSigs to set.") elif len(scriptSig) == 0 or len(scriptPubKey) == 0: raise Exception("Scripts can't be empty") else: tx.version = 1 # INPUTS tx.inputs = len(prev_tx_id) tx.prev_tx_id = prev_tx_id tx.prev_out_index = prev_out_index for i in range(tx.inputs): # ScriptSig tx.scriptSig_len.append(len(scriptSig[i].content) / 2) tx.scriptSig.append(scriptSig[i]) tx.nSequence.append(pow(2, 32) - 1) # ffffffff # OUTPUTS tx.outputs = len(scriptPubKey) for i in range(tx.outputs): tx.value.append(value[i]) # ScriptPubKey tx.scriptPubKey_len.append(len(scriptPubKey[i].content) / 2) tx.scriptPubKey.append(scriptPubKey[i]) # Output script. # If fees have been set, subtract them from the final value. Otherwise, assume they have been already # subtracted when specifying the amounts. if fees: tx.value[-1] -= fees tx.nLockTime = 0 tx.hex = tx.serialize() return tx @classmethod def build_from_io(cls, prev_tx_id, prev_out_index, value, outputs, fees=None, network='test'): """ Builds a transaction from a collection of inputs and outputs, such as previous transactions references and output references (either public keys, Bitcoin addresses, list of public keys (for multisig transactions), etc). This builder leaves the transaction ready to sign, so its the one to be used in most cases (Standard transactions). outputs format: P2PKH -> Bitcoin address, or list of Bitcoin addresses. e.g: output = btc_addr or output = [btc_addr0, btc_addr1, ...] P2PK -> Serialized Public key, or list of serialized pubic keys. (use keys.serialize_pk) e.g: output = pk or output = [pk0, pk1, ...] P2MS -> List of int (m) and public keys, or list of lists of int (m_i) and public keys. m represent the m-of-n number of public keys needed to redeem the transaction. e.g: output = [n, pk0, pk1, ...] or output = [[n_0, pk0_0, pk0_1, ...], [n_1, pk1_0, pk1_1, ...], ...] P2SH -> script hash (hash160 str hex) or list of hash 160s. e.g: output = da1745e9b549bd0bfa1a569971c77eba30cd5a4b or output = [da1745e9b549bd0bfa1a569971c77eba30cd5a4b, ...] :param prev_tx_id: Previous transaction id. :type prev_tx_id: either str or list of str :param prev_out_index: Previous output index. Together with prev_tx_id represent the UTXOs the current transaction is aiming to redeem. :type prev_out_index: either str or list of str :param value: Value in Satoshis to be spent. :type value: either int or list of int :param outputs: Information to build the output of the transaction. :type outputs: See above outputs format. :param fees: Fees that will be applied to the transaction. If set, fees will be subtracted from the last output. :type fees: int :param network: Network into which the transaction will be published (either mainnet or testnet). :type network: str :return: Transaction build with the input and output provided data. :rtype: TX """ ins = [] outs = [] # Normalize all parameters if isinstance(prev_tx_id, str): prev_tx_id = [prev_tx_id] if isinstance(prev_out_index, int): prev_out_index = [prev_out_index] if isinstance(value, int): value = [value] if isinstance(outputs, str) or (isinstance(outputs, list) and isinstance(outputs[0], int)): outputs = [outputs] # If fees have been set, subtract them from the final value. Otherwise, assume they have been already # subtracted when specifying the amounts. if fees: value[-1] -= fees if len(prev_tx_id) != len(prev_out_index): raise Exception("Previous transaction id and index number of elements must match. " + str(len(prev_tx_id)) + "!= " + str(len(prev_out_index))) elif len(value) != len(outputs): raise Exception( "Each output must have set a Satoshi amount. Use 0 if no value is going to be transferred.") for o in outputs: # Multisig outputs are passes ad an integer m representing the m-of-n transaction, amb m public keys. if isinstance(o, list) and o[0] in range(1, 15): pks = [is_public_key(pk) for pk in o[1:]] if all(pks): oscript = OutputScript.P2MS(o[0], len(o) - 1, o[1:]) else: raise Exception("Bad output") elif is_public_key(o): oscript = OutputScript.P2PK(o) elif is_btc_addr(o, network): oscript = OutputScript.P2PKH(o) elif is_script(o): oscript = OutputScript.P2SH(o) else: raise Exception("Bad output") outs.append(deepcopy(oscript)) for _ in range(len(prev_tx_id)): # Temporarily set IS content to 0, since data will be signed afterwards. iscript = InputScript() ins.append(iscript) # Once all inputs and outputs has been formatted as scripts, we could construct the transaction with the proper # builder. tx = cls.build_from_scripts( prev_tx_id, prev_out_index, value, ins, outs) return tx @classmethod def deserialize(cls, hex_tx): """ Builds a transaction object from the hexadecimal serialization format of a transaction that could be obtained, for example, from a blockexplorer. :param hex_tx: Hexadecimal serialized transaction. :type hex_tx: hex str :return: The transaction build using the provided hex serialized transaction. :rtype: TX """ tx = cls() tx.hex = hex_tx tx.version = int(change_endianness(parse_element(tx, 4)), 16) # INPUTS tx.inputs = int(parse_varint(tx), 16) for i in range(tx.inputs): tx.prev_tx_id.append(change_endianness(parse_element(tx, 32))) tx.prev_out_index.append( int(change_endianness(parse_element(tx, 4)), 16)) # ScriptSig tx.scriptSig_len.append(int(parse_varint(tx), 16)) tx.scriptSig.append(InputScript.from_hex( parse_element(tx, tx.scriptSig_len[i]))) tx.nSequence.append(int(parse_element(tx, 4), 16)) # OUTPUTS tx.outputs = int(parse_varint(tx), 16) for i in range(tx.outputs): tx.value.append(int(change_endianness(parse_element(tx, 8)), 16)) # ScriptPubKey tx.scriptPubKey_len.append(int(parse_varint(tx), 16)) tx.scriptPubKey.append(OutputScript.from_hex( parse_element(tx, tx.scriptPubKey_len[i]))) tx.nLockTime = int(parse_element(tx, 4), 16) if tx.offset != len(tx.hex): raise Exception("There is some error in the serialized transaction passed as input. Transaction can't" " be built") else: tx.offset = 0 return tx def serialize(self, rtype=hex): """ Serialize all the transaction fields arranged in the proper order, resulting in a hexadecimal string ready to be broadcast to the network. :param self: self :type self: TX :param rtype: Whether the serialized transaction is returned as a hex str or a byte array. :type rtype: hex or bool :return: Serialized transaction representation (hexadecimal or bin depending on rtype parameter). :rtype: hex str / bin """ if rtype not in [hex, bin]: raise Exception( "Invalid return type (rtype). It should be either hex or bin.") # 4-byte version number (LE). serialized_tx = change_endianness(int2bytes(self.version, 4)) # INPUTS serialized_tx += encode_varint(self.inputs) # Varint number of inputs. for i in range(self.inputs): # 32-byte hash of the previous transaction (LE). serialized_tx += change_endianness(self.prev_tx_id[i]) # 4-byte output index (LE) serialized_tx += change_endianness( int2bytes(self.prev_out_index[i], 4)) # Varint input script length. serialized_tx += encode_varint(len(self.scriptSig[i].content) / 2) # ScriptSig serialized_tx += self.scriptSig[i].content # Input script. # 4-byte sequence number. serialized_tx += int2bytes(self.nSequence[i], 4) # OUTPUTS # Varint number of outputs. serialized_tx += encode_varint(self.outputs) if self.outputs != 0: for i in range(self.outputs): # 8-byte field Satoshi value (LE) serialized_tx += change_endianness(int2bytes(self.value[i], 8)) # ScriptPubKey # Varint Output script length. serialized_tx += encode_varint( len(self.scriptPubKey[i].content) / 2) serialized_tx += self.scriptPubKey[i].content # Output script. serialized_tx += int2bytes(self.nLockTime, 4) # 4-byte lock time field # If return type has been set to binary, the serialized transaction is converted. if rtype is bin: serialized_tx = unhexlify(serialized_tx) return serialized_tx def get_txid(self, rtype=hex, endianness="LE"): """ Computes the transaction id (i.e: transaction hash for non-segwit txs). :param rtype: Defines the type of return, either hex str or bytes. :type rtype: str or bin :param endianness: Whether the id is returned in BE (Big endian) or LE (Little Endian) (default one) :type endianness: str :return: The hash of the transaction (i.e: transaction id) :rtype: hex str or bin, depending on rtype parameter. """ if rtype not in [hex, bin]: raise Exception( "Invalid return type (rtype). It should be either hex or bin.") if endianness not in ["BE", "LE"]: raise Exception( "Invalid endianness type. It should be either BE or LE.") if rtype is hex: tx_id = hexlify( sha256(sha256(self.serialize(rtype=bin)).digest()).digest()) if endianness == "BE": tx_id = change_endianness(tx_id) else: tx_id = sha256(sha256(self.serialize(rtype=bin)).digest()).digest() if endianness == "BE": tx_id = unhexlify(change_endianness(hexlify(tx_id))) return tx_id def sign(self, sk, index, hashflag=SIGHASH_ALL, compressed=True, orphan=False, deterministic=True, network='test'): """ Signs a transaction using the provided private key(s), index(es) and hash type. If more than one key and index is provides, key i will sign the ith input of the transaction. :param sk: Private key(s) used to sign the ith transaction input (defined by index). :type sk: SigningKey or list of SigningKey. :param index: Index(es) to be signed by the provided key(s). :type index: int or list of int :param hashflag: Hash type to be used. It will define what signature format will the unsigned transaction have. :type hashflag: int :param compressed: Indicates if the public key that goes along with the signature will be compressed or not. :type compressed: bool :param orphan: Whether the inputs to be signed are orphan or not. Orphan inputs are those who are trying to redeem from a utxo that has not been included in the blockchain or has not been seen by other nodes. Orphan inputs must provide a dict with the index of the input and an OutputScript that matches the utxo to be redeemed. e.g: orphan_input = dict({0: OutputScript.P2PKH(btc_addr)) :type orphan: dict(index, InputScript) :param deterministic: Whether the signature is performed using a deterministic k or not. Set by default. :type deterministic: bool :param network: Network from which the previous ScripPubKey will be queried (either main or test). :type network: str :return: Transaction signature. :rtype: str """ # Normalize all parameters if isinstance(sk, list) and isinstance(index, int): # In case a list for multisig is received as only input. sk = [sk] if isinstance(sk, SigningKey): sk = [sk] if isinstance(index, int): index = [index] for i in range(len(sk)): # If the input to be signed is orphan, the OutputScript of the UTXO to be redeemed will be passed to # the signature_format function, otherwise False is passed and the UTXO will be requested afterwards. o = orphan if not orphan else orphan.get(i) # The unsigned transaction is formatted depending on the input that is going to be signed. For input i, # the ScriptSig[i] will be set to the scriptPubKey of the UTXO that input i tries to redeem, while all # the other inputs will be set blank. unsigned_tx = self.signature_format(index[i], hashflag, o, network) # Then, depending on the format how the private keys have been passed to the signing function # and the content of the ScripSig field, a different final scriptSig will be created. if isinstance(sk[i], list) and unsigned_tx.scriptSig[index[i]].type == "P2MS": sigs = [] for k in sk[i]: sigs.append(ecdsa_tx_sign( unsigned_tx.serialize(), k, hashflag, deterministic)) iscript = InputScript.P2MS(sigs) elif isinstance(sk[i], SigningKey) and unsigned_tx.scriptSig[index[i]].type == "P2PK": s = ecdsa_tx_sign(unsigned_tx.serialize(), sk[i], hashflag, deterministic) iscript = InputScript.P2PK(s) elif isinstance(sk[i], SigningKey) and unsigned_tx.scriptSig[index[i]].type == "P2PKH": s = ecdsa_tx_sign(unsigned_tx.serialize(), sk[i], hashflag, deterministic) pk = serialize_pk(sk[i].get_verifying_key(), compressed) iscript = InputScript.P2PKH(s, pk) elif unsigned_tx.scriptSig[index[i]].type == "unknown": raise Exception( "Unknown previous transaction output script type. Can't sign the transaction.") else: raise Exception("Can't sign input " + str(i) + " with the provided data.") # Finally, temporal scripts are stored as final and the length of the script is computed self.scriptSig[i] = iscript self.scriptSig_len[i] = len(iscript.content) / 2 self.hex = self.serialize() def signature_format(self, index, hashflag=SIGHASH_ALL, orphan=False, network='test'): """ Builds the signature format an unsigned transaction has to follow in order to be signed. Basically empties every InputScript field but the one to be signed, identified by index, that will be filled with the OutputScript from the UTXO that will be redeemed. The format of the OutputScripts will depend on the hashflag: - SIGHASH_ALL leaves OutputScript unchanged. - SIGHASH_SINGLE should sign each input with the output of the same index (not implemented yet). - SIGHASH_NONE empies all the outputs. - SIGHASH_ANYONECANPAY not sure about what should do (obviously not implemented yet). :param index: The index of the input to be signed. :type index: int :param hashflag: Hash type to be used, see above description for further information. :type hashflag: int :param orphan: Whether the input is orphan or not. Orphan inputs must provide an OutputScript that matches the utxo to be redeemed. :type orphan: OutputScript :param network: Network into which the transaction will be published (either mainnet or testnet). :type network: str :return: Transaction properly formatted to be signed. :rtype TX """ tx = deepcopy(self) for i in range(tx.inputs): if i is index: if not orphan: script, t = get_prev_ScriptPubKey( tx.prev_tx_id[i], tx.prev_out_index[i], network) # Once we get the previous UTXO script, the inputScript is temporarily set to it in order to sign # the transaction. tx.scriptSig[i] = InputScript.from_hex(script) tx.scriptSig[i].type = t else: # If input to be signed is orphan, the orphan InputScript is used when signing the transaction. tx.scriptSig[i] = orphan tx.scriptSig_len[i] = len(tx.scriptSig[i].content) / 2 elif tx.scriptSig[i].content != "": # All other scriptSig fields are emptied and their length is set to 0. tx.scriptSig[i] = InputScript() tx.scriptSig_len[i] = len(tx.scriptSig[i].content) / 2 if hashflag is SIGHASH_SINGLE: # First we checks if the input that we are trying to sign has a corresponding output, if so, the execution # can continue. Otherwise, we abort the signature process since it could lead to a irreversible lose of # funds due to a bug in SIGHASH_SINGLE. # https://bitcointalk.org/index.php?topic=260595 if index >= tx.outputs: raise Exception("You are trying to use SIGHASH_SINGLE to sign an input that does not have a " "corresponding output (" + str(index) + "). This could lead to a irreversible lose " "of funds. Signature process aborted.") # Otherwise, all outputs will set to empty scripts but the ith one (identified by index), # since SIGHASH_SINGLE should only sign the ith input with the ith output. else: # How to properly deal with SIGHASH_SINGLE signature format extracted from: # https://github.com/bitcoin/bitcoin/blob/3192e5278a/test/functional/test_framework/script.py#L869 # First we backup the output that we will sign, t_script = tx.scriptPubKey[index] t_size = tx.scriptPubKey_len[index] t_value = tx.value[index] # Then, we delete every single output. tx.scriptPubKey = [] tx.scriptPubKey_len = [] tx.value = [] for o in range(index): # Once the all outputs have been deleted, we create empty outputs for every single index before # the one that will be signed. Furthermore, the value of the output if set to maximum (2^64-1) tx.scriptPubKey.append(OutputScript()) tx.scriptPubKey_len.append( len(tx.scriptPubKey[o].content) / 2) tx.value.append(pow(2, 64) - 1) # Once we reach the index of the output that will be signed, we restore it with the one that we backed # up before. tx.scriptPubKey.append(t_script) tx.scriptPubKey_len.append(t_size) tx.value.append(t_value) # Finally, we recalculate the number of outputs for the signature format. # Notice that each signature format will have index number of outputs! Otherwise it will be invalid. tx.outputs = len(tx.scriptPubKey) elif hashflag is SIGHASH_NONE: # Empty all the scriptPubKeys and set the length and the output counter to 0. tx.outputs = 0 tx.scriptPubKey = OutputScript() tx.scriptPubKey_len = len(tx.scriptPubKey.content) / 2 elif hashflag is SIGHASH_ANYONECANPAY: # ToDo: Implement SIGHASH_ANYONECANPAY pass if hashflag in [SIGHASH_SINGLE, SIGHASH_NONE]: # All the nSequence from inputs except for the current one (index) is set to 0. # https://github.com/bitcoin/bitcoin/blob/3192e5278a/test/functional/test_framework/script.py#L880 for i in range(tx.inputs): if i is not index: tx.nSequence[i] = 0 return tx def display(self): """ Displays all the information related to the transaction object, properly split and arranged. Data between parenthesis corresponds to the data encoded following the serialized transaction format. (replicates the same encoding being done in serialize method) :param self: self :type self: TX :return: None. :rtype: None """ print("version: " + str(self.version) + " (" + change_endianness(int2bytes(self.version, 4)) + ")") print("number of inputs: " + str(self.inputs) + " (" + encode_varint(self.inputs) + ")") for i in range(self.inputs): print("input " + str(i)) print("\t previous txid (little endian): " + self.prev_tx_id[i] + " (" + change_endianness(self.prev_tx_id[i]) + ")") print("\t previous tx output (little endian): " + str(self.prev_out_index[i]) + " (" + change_endianness(int2bytes(self.prev_out_index[i], 4)) + ")") print("\t input script (scriptSig) length: " + str(self.scriptSig_len[i]) + " (" + encode_varint((self.scriptSig_len[i])) + ")") print("\t input script (scriptSig): " + self.scriptSig[i].content) print("\t decoded scriptSig: " + Script.deserialize(self.scriptSig[i].content)) if self.scriptSig[i].type == "P2SH": print("\t \t decoded redeemScript: " + InputScript.deserialize(self.scriptSig[i].get_element(-1)[1:-1])) print("\t nSequence: " + str(self.nSequence[i]) + " (" + int2bytes(self.nSequence[i], 4) + ")") print("number of outputs: " + str(self.outputs) + " (" + encode_varint(self.outputs) + ")") for i in range(self.outputs): print("output " + str(i)) print("\t Satoshis to be spent (little endian): " + str(self.value[i]) + " (" + change_endianness(int2bytes(self.value[i], 8)) + ")") print("\t output script (scriptPubKey) length: " + str(self.scriptPubKey_len[i]) + " (" + encode_varint(self.scriptPubKey_len[i]) + ")") print("\t output script (scriptPubKey): " + self.scriptPubKey[i].content) print("\t decoded scriptPubKey: " + Script.deserialize(self.scriptPubKey[i].content)) print("nLockTime: " + str(self.nLockTime) + " (" + int2bytes(self.nLockTime, 4) + ")") ################################################# # Hex transaction analysis # ################################################# # --------------------------------------------------------------------------------------------------------------------- # The following piece of code parses a serialized transaction (hex encoded) and displays all the information related # to it. # - Leftmost displayed transaction shows data as should be interpreted (human-readable), while rightmost # (surrounded by parenthesis) shows it as it is in the serialize transaction (can be used to identify it inside the # transaction) # - You should change the hex_tx for the one you'd like to deserialize. Serialized transaction can be obtain from block # explorers such as blockcypher.com or blockchain.info, or by building a transaction using some of the library tools. # --------------------------------------------------------------------------------------------------------------------- # First a transaction object is created (through the deserialize constructor) by deserializing the hex transaction we # have selected. if __name__ == "__main__": hex_tx = "01000000013ca58d2f6fac36602d831ee0cf2bc80031c7472e80a322b57f614c5ce9142b71000000006b483045022100f0331d85cb7f7ec1bedc41f50c695d654489458e88aec0076fbad5d8aeda1673022009e8ca2dda1d6a16bfd7133b0008720145dacccb35c0d5c9fc567e52f26ca5f7012103a164209a7c23227fcd6a71c51efc5b6eb25407f4faf06890f57908425255e42bffffffff0241a20000000000001976a914e44839239ab36f5bc67b2079de00ecf587233ebe88ac74630000000000001976a914dc7016484646168d99e49f907c86c271299441c088ac00000000" tx = TX.deserialize(hex_tx) # Then, the transaction can be displayed using the display method to analyze how it's been constructed. tx.display()
import unittest import mock from tethys_portal.views.developer import is_staff, home class TethysPortalDeveloperTests(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_is_staff(self): mock_user = mock.MagicMock() mock_user.is_staff = 'foo' self.assertEquals('foo', is_staff(mock_user)) @mock.patch('tethys_portal.views.developer.render') def test_home(self, mock_render): mock_request = mock.MagicMock() context = {} mock_render.return_value = 'foo' self.assertEquals('foo', home(mock_request)) mock_render.assert_called_once_with(mock_request, 'tethys_portal/developer/home.html', context)
print('\n===== DESAFIO 005 ====\n') print('Faça um programa que leia um número inteiro e mostre na tela o seu sucessor e o seu antecessor.') n = int(input('Digite um número: ')) print('O sucessor de {} é {} \ne o antecessor de {} é {}'.format(n, n+1, n, n-1)) print('\n===== DESAFIO 006 ====\n') print('Crie um algoritmo que leia um número e mostre o seu dobro, triplo e raiz quadrada') n= int(input('Digite um número: ')) print('O seu dobro é igual a {}, o triplo igual a {} e sua raiz quadrada igual a {}'.format(n+n, n+n+n, n**(1/2))) print('\n===== DESAFIO 007 ====\n') print('Desenvolva um algoritmo que leia as duas notas de um aluno e calcule a sau média') nota1 = float(input('Nota 1: ')) nota2 = float(input('Nota 2: ')) media = (nota1 + nota2) / 2 print('A sua média é {:.2f}'.format(media)) print('\n===== DESAFIO 008 ====\n') print('Escreva um programa que leia um valor em metros e o exiba convertido em centimetros e milimetros') # decímetro: 10 decímetros correspondem a 1 metro. # centímetro: 100 centímetros corresponde a 1 metro. # milímetro: 1000 milímetros corresponde a 1 metro. v = int(input('Digite um valor em metros: ')) print('{} m metros em centimetros é igual a {:0<2} cm e em milimetros é {:0<3} mn'.format(v, v, v)) print('\n===== DESAFIO 009 ====\n') print('Faça um programa que leia um número inteiro e mostre na tela sua tabuada') n = int(input('Digite um número: ')) print('{} * 1 = {}'.format(n, n*1)) print('{} * 2 = {}'.format(n, n*2)) print('{} * 3 = {}'.format(n, n*3)) print('{} * 4 = {}'.format(n, n*4)) print('{} * 5 = {}'.format(n, n*5)) print('{} * 6 = {}'.format(n, n*6)) print('{} * 7 = {}'.format(n, n*7)) print('{} * 8 = {}'.format(n, n*8)) print('{} * 9 = {}'.format(n, n*9)) print('{} * 10 = {}'.format(n, n*10)) print('\n===== DESAFIO 010 ====\n') print('Crie um programa em que leia quanto uma pessoa tem na carteira e mostre quantos Dólares ela pode comprar\nConsiderando US$1,00 = R$3,27') dol = float(input('Digite quantos reais você possui na carteira: ')) print('Você consegue comprar US${} Dólares'.format(dol/3.27)) print('\n===== DESAFIO 011 ====\n') print('Faça um programa que leia a largura e a altura de uma parede em metros, calcule a sua área e quantidade necessária para pinta-la\n' 'sabendo que cada litro de tinta pinta uma area de 2m²') lg = float(input('Largura: ')) al = float(input('Altura: ')) a = lg * al c = a / 2 print('Em uma parede de {:.1f}m largura x {:.1f}m altura, sua área total é de {:.1f}m e seu consuo sera de {:.1f} litros de tinta'.format(lg, al, a, c)) print('\n===== DESAFIO 012 ====\n') print('Faça um algoritmo que leia o preço de um produto e mostre seu novo preço com 5% de desconto.') pc = float(input('Digite o preço do produto sem deconto: ')) dc = pc * 0.05 dct = pc - dc print('O valor do produto sem desconto é R${:.2f}\nE com o desconto de 5% é igual a R${:.2f}'.format(pc, dct)) print('\n===== DESAFIO 013 ====\n') print('Faça um algoritmo que leia o sálario de um funcionario e mostre seu novo sálario, com 15% de aumento') sal = float(input('Digite o sálario R$')) aum = sal * 0.15 saln = sal + aum print ('O o novo sálario com aumento de 15% é de R${}'.format(saln))
import requests from requests.auth import HTTPBasicAuth import time url = "https://api.mysportsfeeds.com/v2.1/pull/nhl/2021-2022-regular/games.csv" token = "4e92f126-d598-4577-98e3-bb0674" password = "MYSPORTSFEEDS" top_directory = r"C:\Users\John Lang\Documents\Marauder\NHL\core" years = ["2017","2018","2019","2020","2021"] season_playoff = ["regular","playoff"] print("_________________________") print("PLAYER STAT PROJECTSIONS ") print("_________________________") counter = 0 for year in years: for condition in season_playoff: try: print(year) print(condition) player_stat_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/player_stats_totals_projections.csv" player_stat_r = requests.get(player_stat_urls, auth=HTTPBasicAuth(token, password)) print(player_stat_r.status_code) print("writing file to " + top_directory + f"/{year[0:4]}/{year[0:4]}_playerprojections_{condition}.csv") file = open(top_directory + f"/{year[0:4]}/{year[0:4]}_playerprojections_{condition}.csv", "w+", encoding="utf-8") file.write(player_stat_r.text) counter = counter + 1 print("-------------------------------------") print(f"BOOM SAVED FOR {year} AND {condition}") print("-------------------------------------") while (counter > 1): print("Cooling down request sends... Go get money:") time.sleep(30) counter = 0 print("COUNTER RESET, loop restarting") except: print("-------------------------------------") print("bro something fucked up: " + str(player_stat_r.status_code)) print("-------------------------------------")
import cv2 as cv import numpy as np img = cv.imread("nancy.jpg") # resizing resize = cv.resize(img, (400, 400), interpolation=cv.INTER_AREA) print(img.shape) print(resize.shape) cv.imshow("resize", resize) # cropping Cropped = img[150:200, 200:300] cv.imshow("Crop", Cropped) cv.waitKey(0) cv.destroyAllWindows()
from requirements_detector.detect import find_requirements
#!/home/blur/Desktop/projekty/subjectio/bot/bot_backend/.venv/bin/python # import sys # sys.path.append(".") from botski.importy import * from botski.config import * from botski.models import * from botski.db import * from botski.tweepy_init import * from botski.helpers import * locale.setlocale(locale.LC_TIME, 'pl_PL.utf8') app = Flask(__name__) app.secret_key = FLASK_SECRET_KEY app.debug = True # from flask_debugtoolbar import DebugToolbarExtension # toolbar = DebugToolbarExtension(app) def check_is_api_blocked(func=None): @functools.wraps(func) def inner(*args, **kwargs): if Settings.is_api_blocked(): debug_bot(f"[API] IN {func.__name__}() API IS BLOCKED BY THE USER") return None return func(*args, **kwargs) return inner @check_is_api_blocked def send_comment(task:JobQueue, REALLY=False): tweet_id = task.tweet.tweet_id comment_text = task.payload.twitter_comment photo_path = task.payload.uploaded_image debug_bot("[API] Comment will be sent for tweet ID:", tweet_id, "WITH TEXT:", comment_text, "WITH PHOTO:", photo_path) if not REALLY: return image = (photo_path) media_ids = [api.media_upload(image).media_id_string] tweet = None try: tweet = api.update_status(status = comment_text, in_reply_to_status_id = tweet_id, auto_populate_reply_metadata=True, media_ids = media_ids) debug_bot("[API] Comment SENT!") except tweepy.error.TweepError as e: debug_bot(f"[ERROR] {e}") # debug_bot(f"[API_CODE] {e.api_code}") handle_sending_error(task, e.api_code) return tweet def handle_sending_error(task:JobQueue, tweepy_api_code): if tweepy_api_code == 433: # [{'code': 433, 'message': 'The original Tweet author restricted who can reply to this Tweet.'}] mark_task_as_done(task) if tweepy_api_code == 385: # [{'code': 385, 'message': 'You attempted to reply to a Tweet that is deleted or not visible to you.'}] mark_task_as_done(task) # [{'code': 186, 'message': 'Tweet needs to be a bit shorter.'}] # [{'code': 326, 'message': 'To protect our users from spam and other malicious activity, this account is temporarily locked. Please log in to https://twitter.com to unlock your account.'}] def build_query_for_payload(payload:Payloads) -> str: query = payload["twitter_query"] + " " + ADDITIONAL_QUERY_SUFFIX return query def get_latest_tweet_for_payload(payload:Payloads): newest_tweet = JobQueue.get_newest_tweet(payload) since_id = None if hasattr(newest_tweet, 'tweet'): since_id = newest_tweet.tweet.tweet_id debug_bot(f'[INFO] Latest found tweet for payload -> since_id={since_id}') return since_id @check_is_api_blocked def find_tweets_for_payload(payload:Payloads) -> tweepy.cursor.ItemIterator: since_id = get_latest_tweet_for_payload(payload) query = build_query_for_payload(payload) debug_bot("[API] Looking tweets for query:", query) tweepy_cursor = tweepy.Cursor(api.search, since_id = since_id, result_type="recent", q=query, count=HOW_MANY_TWEETS_TO_FIND).items(HOW_MANY_TWEETS_TO_FIND) return tweepy_cursor def save_found_tweets_into_database(tweepy_cursor:tweepy.cursor.ItemIterator, payload:Payloads) -> list: try: # strange error from 12.10.2020.txt for tweet in tweepy_cursor: tweet_id = int(tweet.id_str) try: debug_bot(f"[DB] Add tweet with id: {tweet_id}") FoundTweets( tweet_id = int(tweet_id), tweet_user = str(tweet.user.screen_name), tweet_created = str(tweet.created_at), tweet_text = str(tweet.text)).save() except mongoengine.errors.NotUniqueError: debug_bot("[DB] DUPLICATED in FoundTweets!") try: debug_bot("[DB] Add to JobQueue tweet_id:", int(tweet_id)) payload = Payloads.objects(id=payload.id).first() tweet = FoundTweets.objects(tweet_id=tweet.id_str).first() JobQueue(payload = payload, tweet = tweet).save() except mongoengine.errors.NotUniqueError: debug_bot("[DB] DUPLICATED in JobQueue!") except tweepy.error.TweepError as e: debug_bot(f"[ERROR] {e}") return True def mark_task_as_done(task): JobQueue.objects(tweet=task.tweet).update_one(job_done=True) debug_bot("[QUEUE] From now this task is marked as done.") def process_queue_and_send(queue): tweet_id = None sent_tweet = None task_number = 0 for task in queue: debug_bot("\n[PROCESS_QUEUE] processing JobQueue id:", task.id) if not hasattr(task, 'payload'): debug_bot(f"[PROCESS_QUEUE] skip task: {task.id} because his payload has been deleted in the meantime") continue if task.payload.active == False: debug_bot("[PROCESS_QUEUE] delete task with tweet_id:", task.tweet.tweet_id, "because his payload has been turned off") # DODATKOWO TUTAJ POTRZEBNY JEST TRY JobQueue.objects(tweet=task.tweet).delete() FoundTweets.objects(tweet_id=int(task.tweet.tweet_id)).delete() continue if task_number > 0: sleep_time = Time.estimateSleepTimeForSendComment() sleep_bot(sleep_time, more_info="before I can send next tweet") task_number += 1 debug_bot("[PROCESS_QUEUE] Preparing to send comment Nº", task_number, "/", len(queue)) try: sent_tweet = send_comment(task, REALLY=True) # except tweepy.error.TweepError as e: # if e.api_code == "433": # TODO# TODO # mark_task_as_done(task) # TODO except Exception as e: debug_bot(f"[ERROR] {e}") raise if sent_tweet: sent_at = datetime.datetime.now() new_tweet_id = sent_tweet.id_str JobQueue.objects(tweet=task.tweet).update_one(tweet_sent_id=new_tweet_id) debug_bot("[QUEUE] New ID for sent tweet:", new_tweet_id) JobQueue.objects(tweet=task.tweet).update_one(payload_sent_at=sent_at) debug_bot("[QUEUE] Payload sent at:", sent_at) mark_task_as_done(task) # break def main(loop_nr, uptime): now = datetime.datetime.now() queue = JobQueue.get_elements(job_done=False) active_payloads:QuerySet = Payloads.get_elements(activeOnly=True) debug_bot(f"\n+--------------------------------------------------------------------") debug_bot(f"| Bot iteration nr {loop_nr}, queue size: {len(queue)}, active payloads: {len(active_payloads)}") debug_bot(f"| init: {now}, uptime: {now - uptime} ") debug_bot(f"+--------------------------------------------------------------------\n") if queue: process_queue_and_send(queue) else: debug_bot("[QUEUE] There is nothing to do at the moment") for payload in active_payloads: found_tweets_for_payload = find_tweets_for_payload(payload) if found_tweets_for_payload: save_found_tweets_into_database(found_tweets_for_payload, payload) sleep_time = Time.estimateSleepTime() sleep_bot(sleep_time) def get_payloads(activeOnly=True) -> list: return Payloads.objects(active=activeOnly) def get_actual_api_limits() -> dict: return api.rate_limit_status() if __name__ == "__main__": loop_nr = 0 uptime = datetime.datetime.now() while True: loop_nr += 1 main(loop_nr, uptime)
# -*- coding: utf-8 -*- # # H3C Technologies Co., Limited Copyright 2003-2015, All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlalchemy as sa from neutron import context as nctx import neutron.db.api as db from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from oslo_log import log as logging LOG = logging.getLogger(__name__) VLAN_SEGMENTATION = 'vlan' UUID_LEN = 36 STR_LEN = 255 SEGTYPE_LEN = 12 class HPRelatedNetworks(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """ Representation for table comware_related_nets A network id corresponding a segmentation ID. """ __tablename__ = 'hp_related_nets' network_id = sa.Column(sa.String(UUID_LEN)) segmentation_id = sa.Column(sa.Integer) segmentation_type = sa.Column(sa.String(SEGTYPE_LEN)) def hp_network_representation(self, segmentation_type): return {u'network_id': self.network_id, u'segmentation_id': self.segmentation_id, u'segmentation_type': segmentation_type} class HPRelatedVms(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """ Representation for table comware_related_vms This table stores all the VM informations. """ __tablename__ = 'hp_related_vms' device_id = sa.Column(sa.String(STR_LEN)) host_id = sa.Column(sa.String(STR_LEN)) port_id = sa.Column(sa.String(UUID_LEN)) network_id = sa.Column(sa.String(UUID_LEN)) def hp_vm_representation(self): return {u'device_id': self.device_id, u'host': self.host_id, u'ports': {self.port_id: [{u'port_id': self.port_id, u'network_id': self.network_id}]}} def hp_port_representation(self): return {u'device_id': self.device_id, u'host': self.host_id, u'port_id': self.port_id, u'network_id': self.network_id} def get_network_count(): session = db.get_session() with session.begin(): q = session.query(HPRelatedNetworks) nets_cnt = int(q.count()) return nets_cnt def create_network(tenant_id, network_id, segmentation_id, segment_type): """ Store a network relationship in db. """ session = db.get_session() with session.begin(): network = HPRelatedNetworks(tenant_id=tenant_id, network_id=network_id, segmentation_id=segmentation_id, segmentation_type=segment_type) session.add(network) def delete_network(tenant_id, network_id): """ Remove a network relationship from comware db. """ session = db.get_session() with session.begin(): (session.query(HPRelatedNetworks). filter_by(network_id=network_id).delete()) def create_vm(device_id, host_id, port_id, network_id, tenant_id): """ Relate a vm with comware. """ session = db.get_session() with session.begin(): vm = HPRelatedVms(device_id=device_id, host_id=host_id, port_id=port_id, network_id=network_id, tenant_id=tenant_id) session.add(vm) def delete_vm(device_id, host_id, port_id, network_id, tenant_id): """Removes all relevant information about a VM from repository. """ LOG.info(_("break vm begin")) session = db.get_session() with session.begin(): (session.query(HPRelatedVms). filter_by(device_id=device_id, host_id=host_id, port_id=port_id, tenant_id=tenant_id, network_id=network_id).delete()) LOG.info(_("Break vm end")) def get_segmentation_id(tenant_id, network_id): session = db.get_session() with session.begin(): net = (session.query(HPRelatedNetworks). filter_by(tenant_id=tenant_id, network_id=network_id).first()) return net and net.segmentation_id or None def is_vm_created(device_id, host_id, port_id, network_id, tenant_id): """Checks if a VM is already known to comware. """ session = db.get_session() num_vm = 0 with session.begin(): num_vm = (session.query(HPRelatedVms). filter_by(tenant_id=tenant_id, device_id=device_id, port_id=port_id, network_id=network_id, host_id=host_id).count()) return num_vm > 0 def get_distinct_vms(): session = db.get_session() with session.begin(): vms = (session.query(HPRelatedVms.host_id, HPRelatedVms.network_id).distinct()) return vms return None def get_segment_id_by_net_id(net_id, net_type): session = db.get_session() with session.begin(): net = session.query(HPRelatedNetworks).\ filter_by(network_id=net_id, segmentation_type=net_type).first() if net is not None: return net.segmentation_id else: return None def is_network_created(tenant_id, network_id, seg_id=None): """Checks if a networks is already known to COMWARE.""" session = db.get_session() with session.begin(): if not seg_id: num_nets = (session.query(HPRelatedNetworks). filter_by(tenant_id=tenant_id, network_id=network_id).count()) else: num_nets = (session.query(HPRelatedNetworks). filter_by(tenant_id=tenant_id, network_id=network_id, segmentation_id=seg_id).count()) LOG.info(_("num_nets %s"), str(num_nets)) return num_nets > 0 def created_nets_count(tenant_id): """Returns number of networks for a given tenant. """ session = db.get_session() with session.begin(): return (session.query(HPRelatedNetworks). filter_by(tenant_id=tenant_id).count()) def get_vm_count(network_id, host_id): """ Return the number vm in the same network. """ session = db.get_session() with session.begin(): return (session.query(HPRelatedVms). filter_by(network_id=network_id, host_id=host_id).count()) def get_networks(): session = db.get_session() with session.begin(): model = HPRelatedNetworks all_nets = session.query(model) res = dict( (net.network_id, net.hp_network_representation( VLAN_SEGMENTATION)) for net in all_nets ) return res def get_vms(tenant_id): session = db.get_session() with session.begin(): model = HPRelatedVms none = None all_vms = (session.query(model). filter(model.tenant_id == tenant_id, model.host_id != none, model.device_id != none, model.network_id != none, model.port_id != none)) res = dict( (vm.device_id, vm.hp_vm_representation()) for vm in all_vms ) return res def get_vm_host(device_id, port_id, network_id, tenant_id): session = db.get_session() with session.begin(): qry = (session.query(HPRelatedVms). filter_by(tenant_id=tenant_id, device_id=device_id, port_id=port_id, network_id=network_id)) for one in qry: return one['host_id'] return None def get_host_list(network_id): host_list = [] session = db.get_session() with session.begin(): qry = (session.query(HPRelatedVms). filter_by(network_id=network_id)) for one in qry: host_list.append(one['host_id']) return host_list def get_ports(tenant_id): session = db.get_session() with session.begin(): model = HPRelatedVms none = None all_ports = (session.query(model). filter(model.tenant_id == tenant_id, model.host_id != none, model.device_id != none, model.network_id != none, model.port_id != none)) res = dict( (port.port_id, port.hp_port_representation()) for port in all_ports ) return res def get_host_vlan(): vms = get_distinct_vms() host_vlan = {} for vm in vms: seg_id = get_segment_id_by_net_id(vm.network_id, 'vlan') if seg_id is None: continue host_id = vm.host_id LOG.info(_("host %s seg_id %s"), host_id, str(seg_id)) if host_id in host_vlan: if seg_id not in host_vlan[host_id]: host_vlan[host_id].append(seg_id) else: host_vlan[host_id] = [seg_id] LOG.info(_("Host vlan: %s"), str(host_vlan)) return host_vlan def get_vlanlist_byhost(host_id): host_vlan = get_host_vlan() vlanlist = host_vlan.get(host_id, None) return vlanlist or []
import FWCore.ParameterSet.Config as cms process = cms.Process("EWKDimuonSkim") process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi") process.load("FWCore.MessageLogger.MessageLogger_cfi") process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) # source process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring( 'file:/scratch1/cms/data/summer09/aodsim/zmumu/0016/A68B6BD1-FF83-DE11-B579-001E68A99420.root' # 'file:/scratch1/cms/data/summer09/aodsim/ppMuX/0010/9C519151-5883-DE11-8BC8-001AA0095119.root' ) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(500) ) process.load("Configuration.StandardSequences.Geometry_cff") process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") process.GlobalTag.globaltag = cms.string('MC_31X_V3::All') process.load("Configuration.StandardSequences.MagneticField_cff") # HLT filter import HLTrigger.HLTfilters.hltHighLevel_cfi process.EWK_MuHLTFilter = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone() # Uncomment this to access 8E29 menu and filter on it #process.EWK_MuHLTFilter.TriggerResultsTag = cms.InputTag("TriggerResults","","HLT8E29") #process.EWK_MuHLTFilter.HLTPaths = ["HLT_Mu3", "HLT_DoubleMu3"] # Uncomment this to filter on 1E31 HLT menu process.EWK_MuHLTFilter.HLTPaths = ["HLT_Mu9"] # Muon candidates filters process.goodAODMuons = cms.EDFilter("CandViewSelector", src = cms.InputTag("muons"), cut = cms.string('pt > 0'), filter = cms.bool(True) ) process.goodAODGlobalMuons = cms.EDFilter("CandViewSelector", src = cms.InputTag("goodAODMuons"), cut = cms.string('isGlobalMuon=1'), filter = cms.bool(True) ) # Track candidates process.trackCandsUnfiltered = cms.EDProducer("ConcreteChargedCandidateProducer", src = cms.InputTag("generalTracks"), particleType = cms.string('mu+') # to fix mass hypothesis ) # Track candidates filter process.goodAODTrackCands = cms.EDFilter("CandViewSelector", src = cms.InputTag("trackCandsUnfiltered"), cut = cms.string('pt > 10') ) # Dimuon candidates process.dimuonsAOD = cms.EDFilter("CandViewShallowCloneCombiner", checkCharge = cms.bool(False), cut = cms.string('mass > 0'), decay = cms.string("goodAODMuons@+ goodAODMuons@-") ) process.dimuonsGlobalAOD = cms.EDFilter("CandViewRefSelector", src = cms.InputTag("dimuonsAOD"), cut = cms.string('charge = 0 & daughter(0).isGlobalMuon = 1 & daughter(1).isGlobalMuon = 1') ) process.dimuonsOneStandAloneMuonAOD = cms.EDFilter("CandViewRefSelector", src = cms.InputTag("dimuonsAOD"), cut = cms.string('charge = 0 & mass > 20 & ( (daughter(0).isStandAloneMuon = 1 & daughter(0).isGlobalMuon = 0 & daughter(1).isGlobalMuon = 1) | (daughter(1).isStandAloneMuon = 1 & daughter(1).isGlobalMuon = 0 & daughter(0).isGlobalMuon = 1) )') ) process.dimuonsOneTrackAOD = cms.EDFilter("CandViewShallowCloneCombiner", checkCharge = cms.bool(False), cut = cms.string('mass > 20'), decay = cms.string('goodAODMuons@+ goodAODTrackCands@-') ) process.dimuonsOneGlobalMuonOneTrackAOD = cms.EDFilter("CandViewRefSelector", src = cms.InputTag("dimuonsOneTrackAOD"), cut = cms.string('charge = 0 & daughter(0).isGlobalMuon = 1') ) # dimuon filters process.dimuonsFilter = cms.EDFilter("CandViewCountFilter", src = cms.InputTag("dimuonsAOD"), minNumber = cms.uint32(1) ) process.dimuonsOneTrackFilter = cms.EDFilter("CandViewCountFilter", src = cms.InputTag("dimuonsOneTrackAOD"), minNumber = cms.uint32(1) ) # Skim paths process.EWK_dimuonsPath = cms.Path( process.EWK_MuHLTFilter * process.goodAODMuons * process.goodAODGlobalMuons * process.dimuonsAOD * process.dimuonsGlobalAOD * process.dimuonsOneStandAloneMuonAOD * process.dimuonsFilter ) process.EWK_dimuonsOneTrackPath = cms.Path( process.EWK_MuHLTFilter * process.goodAODMuons * process.goodAODGlobalMuons * process.trackCandsUnfiltered * process.goodAODTrackCands * process.dimuonsOneTrackAOD * process.dimuonsOneGlobalMuonOneTrackAOD * process.dimuonsOneTrackFilter ) # Output module configuration from Configuration.EventContent.EventContent_cff import * EWK_dimuonsEventContent = cms.PSet( outputCommands = cms.untracked.vstring( 'keep *_trackCandsUnfiltered_*_*', 'keep *_goodAODTrackCands_*_*', 'keep *_goodAODMuons_*_*', 'keep *_dimuonsAOD_*_*', 'keep *_dimuonsGlobalAOD_*_*', 'keep *_dimuonsOneStandAloneMuonAOD_*_*', 'keep *_dimuonsOneTrackAOD_*_*', 'keep *_dimuonsOneGlobalMuonOneTrackAOD_*_*', ) ) EWK_DimuonSkimEventContent = cms.PSet( outputCommands = cms.untracked.vstring() ) EWK_DimuonSkimEventContent.outputCommands.extend(AODEventContent.outputCommands) EWK_DimuonSkimEventContent.outputCommands.extend(EWK_dimuonsEventContent.outputCommands) EWK_DimuonSkimEventSelection = cms.PSet( SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'EWK_dimuonsPath', 'EWK_dimuonsOneTrackPath') ) ) process.EWK_DimuonSkimOutputModule = cms.OutputModule("PoolOutputModule", EWK_DimuonSkimEventContent, EWK_DimuonSkimEventSelection, dataset = cms.untracked.PSet( filterName = cms.untracked.string('EWKDimuonSkim'), dataTier = cms.untracked.string('USER') ), fileName = cms.untracked.string('file:testEWKDimuonSkim.root') ) process.outpath = cms.EndPath(process.EWK_DimuonSkimOutputModule)
import json import requests import os import os.path import sys iqurl = sys.argv[1] iquser = sys.argv[2] iqpwd = sys.argv[3] jsonfile = 'applicationevaluations.json' csvfile = 'applicationevaluations.csv' def get_metrics(): req = requests.get('{}/api/v2/reports/applications'.format(iqurl), auth=(iquser, iqpwd), verify=False) if req.status_code == 200: res = req.json() else: res = "Error fetching data" return res def getApplicationName(urlPath): l = urlPath.split('/') return(l[3]) def writeToCsvFile(applicationEvaluations): with open(csvfile, 'w') as fd: fd.write("ApplicationName,EvaluationDate,Stage\n") for applicationEvaluation in applicationEvaluations: stage = applicationEvaluation["stage"] evaluationDate = applicationEvaluation["evaluationDate"] applicationId = applicationEvaluation["applicationId"] applicationName = getApplicationName(applicationEvaluation["reportDataUrl"]) line = applicationName + "," + evaluationDate + "," + stage + "\n" fd.write(line) return def main(): applicationEvaluations = get_metrics() with open(jsonfile, 'w') as fd: json.dump(applicationEvaluations, fd) print(jsonfile) writeToCsvFile(applicationEvaluations) print(csvfile) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- import unittest from app.missions.mission import is_mission_feasible, get_expiry_date from app.planes.supersonic_tu_plane import SupersonicTUPlane class TestMission(object): def __init__(self, **kwargs): self.km_nb = kwargs['km_nb'] self.travellers_nb = kwargs['travellers_nb'] self.pilots_nb = kwargs['pilots_nb'] self.flight_attendants_nb = kwargs['flight_attendants_nb'] self.stopover = None class TestParser(unittest.TestCase): def test_feasible(self): mission = TestMission(km_nb=4000, travellers_nb=0, pilots_nb=0, flight_attendants_nb=0) plane = SupersonicTUPlane(plane_id=0, ready=True, in_mission=False) self.assertTrue(is_mission_feasible(mission, plane)) def test_not_feasible(self): mission = TestMission(km_nb=7000, travellers_nb=0, pilots_nb=0, flight_attendants_nb=0) plane = SupersonicTUPlane(plane_id=0, ready=True, in_mission=False) self.assertFalse(is_mission_feasible(mission, plane)) @unittest.skip("Not implemented yet") def test_get_expiry_date(self): self.assertEqual(0, get_expiry_date()) if __name__ == '__main__': unittest.main()
""" =================================================================================== | N A M E G O E S H E R E L O L (v-2.4.0) | | a game made by SSS_Says_Snek#0194, aimed at upgrading snake | | it's actually gonna be a terrible game, but hey, why not? | | | | ++-=========================================================================-++ | | I N F O R M A T I O N | | | | Date created: 4/4/2021, at 12:05 AM | | | =================================================================================== """ from src.Engine.game import GameLoop if __name__ == "__main__": # Main code to run game game_loop = GameLoop() game_loop.run()
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems import re import json from ..utils import sanitize_for_serialization class DataTableImportJob(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ DataTableImportJob - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'str', 'name': 'str', 'owner': 'AddressableEntityRef', 'status': 'str', 'date_created': 'datetime', 'date_completed': 'datetime', 'upload_uri': 'str', 'import_mode': 'str', 'error_information': 'ErrorBody', 'count_records_updated': 'int', 'count_records_deleted': 'int', 'count_records_failed': 'int', 'self_uri': 'str' } self.attribute_map = { 'id': 'id', 'name': 'name', 'owner': 'owner', 'status': 'status', 'date_created': 'dateCreated', 'date_completed': 'dateCompleted', 'upload_uri': 'uploadURI', 'import_mode': 'importMode', 'error_information': 'errorInformation', 'count_records_updated': 'countRecordsUpdated', 'count_records_deleted': 'countRecordsDeleted', 'count_records_failed': 'countRecordsFailed', 'self_uri': 'selfUri' } self._id = None self._name = None self._owner = None self._status = None self._date_created = None self._date_completed = None self._upload_uri = None self._import_mode = None self._error_information = None self._count_records_updated = None self._count_records_deleted = None self._count_records_failed = None self._self_uri = None @property def id(self): """ Gets the id of this DataTableImportJob. The globally unique identifier for the object. :return: The id of this DataTableImportJob. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this DataTableImportJob. The globally unique identifier for the object. :param id: The id of this DataTableImportJob. :type: str """ self._id = id @property def name(self): """ Gets the name of this DataTableImportJob. :return: The name of this DataTableImportJob. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this DataTableImportJob. :param name: The name of this DataTableImportJob. :type: str """ self._name = name @property def owner(self): """ Gets the owner of this DataTableImportJob. The PureCloud user who started the import job :return: The owner of this DataTableImportJob. :rtype: AddressableEntityRef """ return self._owner @owner.setter def owner(self, owner): """ Sets the owner of this DataTableImportJob. The PureCloud user who started the import job :param owner: The owner of this DataTableImportJob. :type: AddressableEntityRef """ self._owner = owner @property def status(self): """ Gets the status of this DataTableImportJob. The status of the import job :return: The status of this DataTableImportJob. :rtype: str """ return self._status @status.setter def status(self, status): """ Sets the status of this DataTableImportJob. The status of the import job :param status: The status of this DataTableImportJob. :type: str """ allowed_values = ["WaitingForUpload", "Processing", "Failed", "Succeeded"] if status.lower() not in map(str.lower, allowed_values): # print("Invalid value for status -> " + status) self._status = "outdated_sdk_version" else: self._status = status @property def date_created(self): """ Gets the date_created of this DataTableImportJob. The timestamp of when the import began. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z :return: The date_created of this DataTableImportJob. :rtype: datetime """ return self._date_created @date_created.setter def date_created(self, date_created): """ Sets the date_created of this DataTableImportJob. The timestamp of when the import began. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z :param date_created: The date_created of this DataTableImportJob. :type: datetime """ self._date_created = date_created @property def date_completed(self): """ Gets the date_completed of this DataTableImportJob. The timestamp of when the import stopped (either successfully or unsuccessfully). Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z :return: The date_completed of this DataTableImportJob. :rtype: datetime """ return self._date_completed @date_completed.setter def date_completed(self, date_completed): """ Sets the date_completed of this DataTableImportJob. The timestamp of when the import stopped (either successfully or unsuccessfully). Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z :param date_completed: The date_completed of this DataTableImportJob. :type: datetime """ self._date_completed = date_completed @property def upload_uri(self): """ Gets the upload_uri of this DataTableImportJob. The URL of the location at which the caller can upload the file to be imported :return: The upload_uri of this DataTableImportJob. :rtype: str """ return self._upload_uri @upload_uri.setter def upload_uri(self, upload_uri): """ Sets the upload_uri of this DataTableImportJob. The URL of the location at which the caller can upload the file to be imported :param upload_uri: The upload_uri of this DataTableImportJob. :type: str """ self._upload_uri = upload_uri @property def import_mode(self): """ Gets the import_mode of this DataTableImportJob. The indication of whether the processing should remove rows that don't appear in the import file :return: The import_mode of this DataTableImportJob. :rtype: str """ return self._import_mode @import_mode.setter def import_mode(self, import_mode): """ Sets the import_mode of this DataTableImportJob. The indication of whether the processing should remove rows that don't appear in the import file :param import_mode: The import_mode of this DataTableImportJob. :type: str """ allowed_values = ["ReplaceAll", "Append"] if import_mode.lower() not in map(str.lower, allowed_values): # print("Invalid value for import_mode -> " + import_mode) self._import_mode = "outdated_sdk_version" else: self._import_mode = import_mode @property def error_information(self): """ Gets the error_information of this DataTableImportJob. Any error information, or null of the processing is not in an error state :return: The error_information of this DataTableImportJob. :rtype: ErrorBody """ return self._error_information @error_information.setter def error_information(self, error_information): """ Sets the error_information of this DataTableImportJob. Any error information, or null of the processing is not in an error state :param error_information: The error_information of this DataTableImportJob. :type: ErrorBody """ self._error_information = error_information @property def count_records_updated(self): """ Gets the count_records_updated of this DataTableImportJob. The current count of the number of records processed :return: The count_records_updated of this DataTableImportJob. :rtype: int """ return self._count_records_updated @count_records_updated.setter def count_records_updated(self, count_records_updated): """ Sets the count_records_updated of this DataTableImportJob. The current count of the number of records processed :param count_records_updated: The count_records_updated of this DataTableImportJob. :type: int """ self._count_records_updated = count_records_updated @property def count_records_deleted(self): """ Gets the count_records_deleted of this DataTableImportJob. The current count of the number of records deleted :return: The count_records_deleted of this DataTableImportJob. :rtype: int """ return self._count_records_deleted @count_records_deleted.setter def count_records_deleted(self, count_records_deleted): """ Sets the count_records_deleted of this DataTableImportJob. The current count of the number of records deleted :param count_records_deleted: The count_records_deleted of this DataTableImportJob. :type: int """ self._count_records_deleted = count_records_deleted @property def count_records_failed(self): """ Gets the count_records_failed of this DataTableImportJob. The current count of the number of records that failed to import :return: The count_records_failed of this DataTableImportJob. :rtype: int """ return self._count_records_failed @count_records_failed.setter def count_records_failed(self, count_records_failed): """ Sets the count_records_failed of this DataTableImportJob. The current count of the number of records that failed to import :param count_records_failed: The count_records_failed of this DataTableImportJob. :type: int """ self._count_records_failed = count_records_failed @property def self_uri(self): """ Gets the self_uri of this DataTableImportJob. The URI for this object :return: The self_uri of this DataTableImportJob. :rtype: str """ return self._self_uri @self_uri.setter def self_uri(self, self_uri): """ Sets the self_uri of this DataTableImportJob. The URI for this object :param self_uri: The self_uri of this DataTableImportJob. :type: str """ self._self_uri = self_uri def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_json(self): """ Returns the model as raw JSON """ return json.dumps(sanitize_for_serialization(self.to_dict())) def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
import os from tornado.web import url from app.handlers import index handlers = [ ('/', index.IndexHandler), # url('/html', HtmlHandler), # url('/data/?', DBHandler), # url(r'/(?P<name>\w+)/?', MainNameHandler), # (r"/(.*?)/(.*?)/(.*)", RedirectHandler, {"url": "/{1}/{0}/{2}", 'permanent': True}), # (r"/api/register", Passport.RegisterHandler), # (r"/api/login", Passport.LoginHandler), # (r"/api/logout", Passport.LogoutHandler), ]
#!/usr/bin/env python # Standard library imports import os.path import sys # Third party imports import numpy as np import pytest # Local imports from shakelib.conversions.imt.newmark_hall_1982 import NewmarkHall1982 homedir = os.path.dirname(os.path.abspath(__file__)) # where is this script? shakedir = os.path.abspath(os.path.join(homedir, '..', '..', '..', '..')) sys.path.insert(0, shakedir) def test_newmarkhall1982(): # Inputs PSA10in = np.log(0.1) sd = 0.6 nh82 = NewmarkHall1982() # Test that the correct inputs are returned for valid outputs input1 = nh82.getInputIMT('pgv ') input2 = nh82.getInputIMT('PGV') assert len(input1) == len(input2) == 1 assert input1[0] == input2[0] == 'PSA10' # Test that invalid outputs return None input3 = nh82.getInputIMT('INVALID') assert input3 == None # Test valid conversions PGVout = nh82.convertAmps('psa10', 'PGV', PSA10in) PGVsdout = nh82.convertSigmas('psa10', 'PGV', sd) mfact = nh82.getConversionFactor() lnsig = nh82.getLnSigma() assert abs(PGVout - np.log(9.46658)) < 0.001 assert abs(PGVsdout - 0.790489) < 0.001 assert abs(mfact - 94.6658) < 0.001 assert abs(lnsig - 0.5146578) < 0.001 # Test invalid conversions with pytest.raises(Exception) as a: tmp = nh82.convertAmps('INVALID', 'PSA05', PGVin) with pytest.raises(Exception) as a: tmp = nh82.convertAmps('PGV', 'INVALID', PGVin) with pytest.raises(Exception) as a: tmp = nh82.convertAmps('INVALID', 'INVALID', PGVin) if __name__ == '__main__': test_newmarkhall1982()
# Aula 8 - Utilizando Módulos from math import sqrt, floor import emoji num = int(input('Digite um numero: ')) raiz = sqrt(num) # importando apenas um modulo da biblioteca, nao eh necessario declarar a biblioteca (math.sqrt) print(f'A raiz quadrada de {num} eh {floor(raiz):.2f}') # floor() arredonda pra cima print(emoji.emojize('Teste da biblioteca emoji :sunglasses:', use_aliases=True))
# Copyright (c) 2019 Eric Steinberger import numpy as np import torch from prl.environment.steinberger.PokerRL.rl import rl_util from prl.environment.steinberger.PokerRL.rl.neural.DuelingQNet import DuelingQNet from prl.environment.steinberger.PokerRL.rl.neural.NetWrapperBase import NetWrapperArgsBase as _NetWrapperArgsBase from prl.environment.steinberger.PokerRL.rl.neural.NetWrapperBase import NetWrapperBase as _NetWrapperBase class DDQN(_NetWrapperBase): def __init__(self, env_bldr, ddqn_args, owner, ): super().__init__( net=DuelingQNet(env_bldr=env_bldr, q_args=ddqn_args.q_args, device=ddqn_args.device_training), env_bldr=env_bldr, args=ddqn_args, owner=owner, device=ddqn_args.device_training, ) self._eps = None self._target_net = DuelingQNet(env_bldr=env_bldr, q_args=ddqn_args.q_args, device=ddqn_args.device_training) self._target_net.eval() self.update_target_net() self._batch_arranged = torch.arange(ddqn_args.batch_size, dtype=torch.long, device=self.device) self._minus_e20 = torch.full((ddqn_args.batch_size, self._env_bldr.N_ACTIONS,), fill_value=-10e20, device=self.device, dtype=torch.float32, requires_grad=False) self._n_actions_arranged = np.arange(self._env_bldr.N_ACTIONS, dtype=np.int32).tolist() @property def eps(self): return self._eps @eps.setter def eps(self, value): self._eps = value def select_br_a(self, pub_obses, range_idxs, legal_actions_lists, explore=False): if explore and (np.random.random() < self._eps): return np.array( [legal_actions[np.random.randint(len(legal_actions))] for legal_actions in legal_actions_lists] ) with torch.no_grad(): self.eval() range_idxs = torch.tensor(range_idxs, dtype=torch.long, device=self.device) q = self._net(pub_obses=pub_obses, range_idxs=range_idxs, legal_action_masks=rl_util.batch_get_legal_action_mask_torch( n_actions=self._env_bldr.N_ACTIONS, legal_actions_lists=legal_actions_lists, device=self.device, dtype=torch.float32)).cpu().numpy() for b in range(q.shape[0]): illegal_actions = [i for i in self._n_actions_arranged if i not in legal_actions_lists[b]] if len(illegal_actions) > 0: illegal_actions = np.array(illegal_actions) q[b, illegal_actions] = -1e20 return np.argmax(q, axis=1) def update_target_net(self): self._target_net.load_state_dict(self._net.state_dict()) self._target_net.eval() def _mini_batch_loop(self, buffer, grad_mngr): batch_pub_obs_t, \ batch_a_t, \ batch_range_idx, \ batch_legal_action_mask_t, \ batch_r_t, \ batch_pub_obs_tp1, \ batch_legal_action_mask_tp1, \ batch_done = \ buffer.sample(device=self.device, batch_size=self._args.batch_size) # [batch_size, n_actions] q1_t = self._net(pub_obses=batch_pub_obs_t, range_idxs=batch_range_idx, legal_action_masks=batch_legal_action_mask_t.to(torch.float32)) q1_tp1 = self._net(pub_obses=batch_pub_obs_tp1, range_idxs=batch_range_idx, legal_action_masks=batch_legal_action_mask_tp1.to(torch.float32)).detach() q2_tp1 = self._target_net(pub_obses=batch_pub_obs_tp1, range_idxs=batch_range_idx, legal_action_masks=batch_legal_action_mask_tp1.to(torch.float32)).detach() # ______________________________________________ TD Learning _______________________________________________ # [batch_size] q1_t_of_a_selected = q1_t[self._batch_arranged, batch_a_t] # only consider allowed actions for tp1 q1_tp1 = torch.where(batch_legal_action_mask_tp1, q1_tp1, self._minus_e20) # [batch_size] _, best_a_tp1 = q1_tp1.max(dim=-1, keepdim=False) q2_best_a_tp1 = q2_tp1[self._batch_arranged, best_a_tp1] q2_best_a_tp1 = q2_best_a_tp1 * (1.0 - batch_done) target = batch_r_t + q2_best_a_tp1 grad_mngr.backprop(pred=q1_t_of_a_selected, target=target) def state_dict(self): return { "q_net": self._net.state_dict(), "target_net": self._target_net.state_dict(), "eps": self._eps, "owner": self.owner, "args": self._args, } def load_state_dict(self, state): assert self.owner == state["owner"] # Not loading args by design self._net.load_state_dict(state["q_net"]) self._target_net.load_state_dict(state["target_net"]) self._eps = state["eps"] @staticmethod def from_state_dict(state_dict, env_bldr): ddqn = DDQN(owner=state_dict["owner"], ddqn_args=state_dict["args"], env_bldr=env_bldr) ddqn.load_state_dict(state_dict) ddqn.update_target_net() return ddqn @staticmethod def inference_version_from_state_dict(state_dict, env_bldr): ddqn = DDQN.from_state_dict(state_dict=state_dict, env_bldr=env_bldr) ddqn.buf = None ddqn.eps = None return ddqn class DDQNArgs(_NetWrapperArgsBase): def __init__(self, q_args, cir_buf_size=1e5, batch_size=512, n_mini_batches_per_update=1, target_net_update_freq=300, optim_str="adam", loss_str="mse", lr=0.005, eps_start=0.065, eps_const=0.007, eps_exponent=0.475, eps_min=0.02, grad_norm_clipping=10.0, device_training="cpu", ): assert isinstance(device_training, str), "Please pass a string (either 'cpu' or 'cuda')!" super().__init__(batch_size=batch_size, n_mini_batches_per_update=n_mini_batches_per_update, optim_str=optim_str, loss_str=loss_str, lr=lr, grad_norm_clipping=grad_norm_clipping, device_training=device_training) self.q_args = q_args self.cir_buf_size = int(cir_buf_size) self.target_net_update_freq = int(target_net_update_freq) self.eps_start = eps_start self.eps_const = eps_const self.eps_exponent = eps_exponent self.eps_min = eps_min
"""Stocastic graph.""" # Copyright (C) 2010-2013 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import networkx as nx from networkx.utils import not_implemented_for __author__ = "Aric Hagberg <aric.hagberg@gmail.com>" __all__ = ['stochastic_graph'] @not_implemented_for('multigraph') @not_implemented_for('undirected') def stochastic_graph(G, copy=True, weight='weight'): """Return a right-stochastic representation of G. A right-stochastic graph is a weighted digraph in which all of the node (out) neighbors edge weights sum to 1. Parameters ----------- G : directed graph A NetworkX DiGraph copy : boolean, optional If True make a copy of the graph, otherwise modify the original graph weight : edge attribute key (optional, default='weight') Edge data key used for weight. If no attribute is found for an edge the edge weight is set to 1. Weights must be positive numbers. """ import warnings if copy: W = nx.DiGraph(G) else: W = G # reference original graph, no copy degree = W.out_degree(weight=weight) for (u,v,d) in W.edges(data=True): if degree[u] == 0: warnings.warn('zero out-degree for node %s'%u) d[weight] = 0.0 else: d[weight] = float(d.get(weight,1.0))/degree[u] return W
#!/usr/bin/python import unittest import autodetect as detect class TestDetectDevice(unittest.TestCase): options = {} def setUp(self): self.options = {} self.options["--ssh-path"] = "/usr/bin/ssh" self.options["--telnet-path"] = "/usr/bin/telnet" self.options["--login-timeout"] = "10" self.options["--shell-timeout"] = "5" self.options["--power-timeout"] = "10" self.options["eol"] = "\r\n" def test_bladecenter(self): self.options["--username"] = "rhts" self.options["--password"] = "100yard-" self.options["--ip"] = "blade-mm.englab.brq.redhat.com" (found_cmd_prompt, conn) = detect.detect_login_telnet(self.options) res = detect.detect_device(conn, self.options, found_cmd_prompt) self.assertEqual('fence_bladecenter', res) def test_apc5(self): self.assertEqual('foo', 'foo') self.options["c"] = "c" print self.options if __name__ == "__main__": unittest.main()
# -*- coding: utf-8 -*- """SPUB.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1cqgT4kKo8l52Rs_hozQQBVFZK8atvd8g #**Sentence Prediction using Bert** """ pip install transformers from transformers import BertTokenizer, BertForNextSentencePrediction import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') text = ("After Abraham Lincoln won the November 1860 presidential election on an " "anti-slavery platform, an initial seven slave states declared their " "secession from the country to form the Confederacy.") text2 = ("War broke out in April 1861 when secessionist forces attacked Fort " "Sumter in South Carolina, just over a month after Lincoln's " "inauguration.") # the text2 is a continuation of text (correlating sentences) text1=("Rome is the capital city and a special comune of Italy (named Comune di Roma Capitale), as well as the capital of the Lazio region " "The city has been a major human settlement for almost three millennia. With 2,860,009 residents in 1,285 km2 (496.1 sq mi) it is also the country's most populated comune " "It is the third most populous city in the European Union by population within city limits " "It is the centre of the Metropolitan City of Rome, which has a population of 4,355,725 residents, thus making it the most populous metropolitan city in Italy " "Its metropolitan area is the third-most populous within Italy.[3] Rome is located in the central-western portion of the Italian Peninsula, within Lazio (Latium), along the shores of the Tiber " "Vatican City (the smallest country in the world)[4] is an independent country inside the city boundaries of Rome, the only existing example of a country within a city for this reason Rome has sometimes been defined as the capital of two states "Rome's Neighbouring country/city Carthage was one of its worst rival as due to the competetion of trade and goods and services ") text3=("Carthage was the capital city of the ancient Carthaginian civilization, on the eastern side of the Lake of Tunis in what is now Tunisia " "Carthage was the most important trading hub of the Ancient Mediterranean and one of the most affluent cities of the classical world " "The city developed from a Phoenician colony into the capital of a Punic empire which dominated large parts of the Southwest Mediterranean during the first millennium BC " "The legendary Queen Alyssa or Dido is regarded as the founder of the city, though her historicity has been questioned " "According to accounts by Timaeus of Tauromenium, she purchased from a local tribe the amount of land that could be covered by an oxhide ") # the text3 is a continuation of text1 (correlating sentences(twisted)) """**The three fundamental steps to complete the same are** **Tokenization,** **Create classification label and** **Calculate loss** #**Tokenization** """ help(tokenizer) inputs = tokenizer(text, text2, return_tensors='pt') inputs.keys() inputs1 = tokenizer(text1, text3, return_tensors='pt') inputs1.keys() inputs inputs1 """#**Class Label Creation**""" labels = torch.LongTensor([0]) labels """#**Loss calculation**""" outputs = model(**inputs, labels=labels) outputs.keys() outputs1 = model(**inputs1, labels=labels) outputs1.keys() outputs.loss outputs1.loss outputs.loss.item() outputs1.loss.item() """#**Prediction**""" outputs = model(**inputs) outputs.keys() outputs1 = model(**inputs) outputs1.keys() torch.argmax(outputs.logits) torch.argmax(outputs1.logits) """**so Hence as the result of the logit tensor is 0 hence the model believes that text2 comes after text and text3 comes after text1 which is correct**"""
from django.db import models from django.contrib.auth.models import User from django.db.models.fields import related # Create your models here. class Product(models.Model): user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200, null=True, blank=True) image = models.ImageField(null=True, blank=True, default='/placeholder.png') category = models.CharField(max_length=200, null=True, blank=True) description = models.TextField(null=True, blank=True) rating = models.DecimalField( max_digits=7, decimal_places=2, null=True, blank=True) numReviews = models.IntegerField(null=True, blank=True, default=0) price = models.DecimalField( max_digits=7, decimal_places=2, null=True, blank=True) countInStock = models.IntegerField(null=True, blank=True, default=0) _id = models.AutoField(primary_key=True, editable=False) def __str__(self): return self.name class Review(models.Model): product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True) user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200, null=True, blank=True) rating = models.IntegerField(null=True, blank=True, default=0) comment = models.TextField(null=True, blank=True) createdAt = models.DateTimeField(auto_now_add=True) _id = models.AutoField(primary_key=True, editable=False) def __str__(self): return str(self.rating) class Order(models.Model): user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) paymentMethod = models.CharField(max_length=200, null=True, blank=True) taxPrice = models.DecimalField( max_digits=7, decimal_places=2, null=True, blank=True) shippingPrice = models.DecimalField( max_digits=7, decimal_places=2, null=True, blank=True) totalPrice = models.DecimalField( max_digits=7, decimal_places=2, null=True, blank=True) isPaid = models.BooleanField(default=False) paidAt = models.DateTimeField(auto_now_add=False, null=True, blank=True) isDelivered = models.BooleanField(default=False) deliveredAt = models.DateTimeField( auto_now_add=False, null=True, blank=True) createdAt = models.DateTimeField(auto_now_add=True) _id = models.AutoField(primary_key=True, editable=False) def __str__(self): return str(self.createdAt) class OrderItem(models.Model): product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True) order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200, null=True, blank=True) qty = models.IntegerField(null=True, blank=True, default=0) price = models.DecimalField( max_digits=7, decimal_places=2, null=True, blank=True) image = models.CharField(max_length=200, null=True, blank=True) _id = models.AutoField(primary_key=True, editable=False) def __str__(self): return str(self.name) class ShippingAddress(models.Model): order = models.OneToOneField( Order, on_delete=models.CASCADE, null=True, blank=True) address = models.CharField(max_length=200, null=True, blank=True) city = models.CharField(max_length=200, null=True, blank=True) postalCode = models.CharField(max_length=200, null=True, blank=True) country = models.CharField(max_length=200, null=True, blank=True) shippingPrice = models.DecimalField( max_digits=7, decimal_places=2, null=True, blank=True) _id = models.AutoField(primary_key=True, editable=False) def __str__(self): return str(self.address) class Faction(models.Model): name = models.CharField(max_length=200, null=True, blank=True) numPlayed = models.IntegerField(null=True, blank=True, default=0) winRate = models.IntegerField(null=True, blank=True, default=0) _id = models.AutoField(primary_key=True, editable=False) def __str__(self): return str(self.name) class Game(models.Model): player1 = models.ForeignKey( User, on_delete=models.SET_NULL, null=True, related_name="player1") player2 = models.ForeignKey( User, on_delete=models.SET_NULL, null=True, related_name="player2") p1Faction = models.ForeignKey( Faction, on_delete=models.SET_NULL, null=True, related_name="p1Faction") p2Faction = models.ForeignKey( Faction, on_delete=models.SET_NULL, null=True, related_name="p2Faction") p1Score = models.IntegerField(null=True, blank=True, default=0) p2Score = models.IntegerField(null=True, blank=True, default=0) loser = models.ForeignKey( User, on_delete=models.SET_NULL, null=True, related_name="winner") winner = models.ForeignKey( User, on_delete=models.SET_NULL, null=True, related_name="loser") _id = models.AutoField(primary_key=True, editable=False) def __str__(self): return str(self._id)
# Generated by Django 2.1 on 2018-09-04 14:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('phantomapp', '0007_shopcategory_shopproduct'), ] operations = [ migrations.AlterField( model_name='shopproduct', name='description', field=models.TextField(), ), ]
from django.urls import path from .views import ApplicationListAPIView, update_status_apiview urlpatterns = [ path('applications/', ApplicationListAPIView.as_view()), path('update-status/<int:pk>/', update_status_apiview), ]
from strawberry.tools import create_type from .mutations.login import login from .mutations.register import register from .mutations.request_reset_password import request_reset_password from .mutations.reset_password import reset_password from .mutations.update_profile import update_profile Mutation = create_type( "Mutation", [login, register, request_reset_password, reset_password, update_profile], )
"""This module tests the main function""" from mock import patch from game import main, Game def test_main(): """This function tests the main function""" def new_play(self): """We need this function to test the main function""" self.boolean = False with patch.object(Game, 'play', new_play): result = main() assert result
import nibabel as nib import numpy as np import os, glob def run_realign(emb, tar, firstpass = False): realign = [] if firstpass: realign.append(tar) for i, embedding in enumerate(emb): u, s, v = np.linalg.svd(tar.T.dot(embedding), full_matrices=False) xfm = v.T.dot(u.T) realign.append(embedding.dot(xfm)) return realign embeddings = [] subs = [] import pandas as pd df = pd.read_csv('/network/lustre/iss01/home/daniel.margulies/data/lsd/subjects.txt') sublist = np.asarray(df).flatten() for s in sublist: try: subs.append(s) embeddings.append(np.load('/network/lustre/iss01/home/daniel.margulies/data/lsd/embedding/embedding_dense_emb.%s.npy' % s)) except: print(s) realigned = run_realign(embeddings[1:], embeddings[0], firstpass=True) for i in range(5): realigned = run_realign(realigned, np.asarray(np.mean(realigned, axis=0).squeeze())) from scipy.io import savemat savemat('/network/lustre/iss01/home/daniel.margulies/data/lsd/group_embedding.mat', mdict={'emb': realigned, 'subs': subs})
from app import models, db, app user_file = "./users.txt" base = "ctys" num = 1 with open(user_file) as f: for user in f: lname, fname = user.strip().split(",") pw = base + str(num) + lname[:3] user = models.User(firstname=fname, lastname=lname, username=fname.lower(), password=pw, session=2, year="2018") db.session.add(user) num += 1 db.session.commit()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import json import numpy as np import common def load_json(filename): with open(filename, "r") as f: return json.load(f) def load_data(filename): j = load_json(filename) return j["nsamples"], np.array(j["mean"]), np.array(j["variance"]) # convergence results channel flow def convgMeanVar(dir_sol, dir_fig, lx, show_plot, save_plot, plot_id): fn = dir_sol+'convergence_cf_lx'+str(lx)+'.json' M, meanL1Error, varL1Error = load_data(fn) # linear fitting linfit_mean = np.polyfit(np.log(M), np.log(meanL1Error), 1) linfit_var = np.polyfit(np.log(M), np.log(varL1Error), 1) ref_mean = np.exp(np.polyval(linfit_mean, np.log(M))-0.5) ref_var = np.exp(np.polyval(linfit_var, np.log(M))-0.5) slope_mean = linfit_mean[0] slope_var = linfit_var[0] myPlotDict = {} myPlotDict['show_plot'] = show_plot myPlotDict['save_plot'] = save_plot # plot error vs mesh size myPlotDict['xlabel'] = r'Number of samples $M$ [log]' myPlotDict['legend_loc'] = 'upper right' myPlotDict['data_markers'] = ['bo-', 'rs-'] myPlotDict['data_labels'] = ['mean', 'variance'] myPlotDict['ylim'] = [2e-5, 1e-2] #myPlotDict['ylim'] = [2e-5, 6e-3] myPlotDict['ref_data_markers'] = ['b-.', 'r--'] myPlotDict['title'] = ''#r'$\left||\Delta \right||_{L^1}(\mathbf{\mu}^{\mathcal{N}, T})$ and $\left||\Delta\right||_{L^1}((\mathbf{s}^2)^{\mathcal{N}, T})$' myPlotDict['ylabel'] = 'Cauchy error'#r'$\left| \mu_{2N_{\mathbf{x}}} - \mu_{N_{\mathbf{x}}} \right|_{1}$ / $\left| s^2_{2N_{\mathbf{x}}} - s^2_{N_{\mathbf{x}}} \right|_{1}$ [log]' myPlotDict['out_filename'] =dir_fig+'convg_uq_cf_lx'+str(lx)+'.pdf' myPlotDict['xlim'] = [20, 600] myPlotDict['ref_data_labels'] = ['$O(M^{%4.2f})$'%slope_mean, '$O(M^{%4.2f})$'%slope_var] common.plotLogLogData([M, M], [meanL1Error, varL1Error], [ref_mean, ref_var], myPlotDict) if __name__ == "__main__": show_plot = True save_plot = True # Re = 1600 Re = 3200 dir_sol = '../../output/uq_pincompNS_cf/Re'+str(Re)+'/convergence/' dir_fig = '../../figures/uq_incompNS/uq_cf/Re'+str(Re)+'/' lx = 3 convgMeanVar(dir_sol, dir_fig, lx, show_plot, save_plot, 1) # End of file
import struct import mmtf import mmtf.utils.constants def parse_header(input_array): """Parse the header and return it along with the input array minus the header. :param input_array the array to parse :return the codec, the length of the decoded array, the parameter and the remainder of the array""" codec = struct.unpack(mmtf.utils.constants.NUM_DICT[4], input_array[0:4])[0] length = struct.unpack(mmtf.utils.constants.NUM_DICT[4], input_array[4:8])[0] param = struct.unpack(mmtf.utils.constants.NUM_DICT[4], input_array[8:12])[0] return codec,length,param,input_array[12:] def add_header(input_array, codec, length, param): """Add the header to the appropriate array. :param the encoded array to add the header to :param the codec being used :param the length of the decoded array :param the parameter to add to the header :return the prepended encoded byte array""" return struct.pack(mmtf.utils.constants.NUM_DICT[4], codec) + \ struct.pack(mmtf.utils.constants.NUM_DICT[4], length) + \ struct.pack(mmtf.utils.constants.NUM_DICT[4], param) + input_array
import requests def get_proxy(): return requests.get("http://127.0.0.1:8010/get?type=https").json() def delete_proxy(proxy): requests.get("http://127.0.0.1:8010/delete/?proxy={}".format(proxy)) # your spider code def getHtml(): # .... retry_count = 5 proxy = get_proxy().get("proxy") while retry_count > 0: try: html = requests.get('https://www.example.com', proxies={"all": "http://{}".format(proxy)}) # 使用代理访问 return html except Exception: retry_count -= 1 # 删除代理池中代理 delete_proxy(proxy) return None if __name__ == '__main__': success_proxy_count = 0 for i in range(0, 1): html = getHtml() html.encoding = html.apparent_encoding print(html.text) print("try times %d,status code: %d" % (i, 4003 if html is None else html.status_code)) if html and html.status_code == 200: success_proxy_count += 1 print(success_proxy_count)
MODEL_PATH = "yolo-coco" MIN_CONF = 0.3 NMS_THRESH = 0.4 USE_GPU = False MIN_DISTANCE = 50 from detection import detect_people from scipy.spatial import distance as dist import numpy as np import argparse import imutils import time import cv2 import os start_time = time.time() ap = argparse.ArgumentParser() ap.add_argument("-i", "--input", type=str, default="", help="path to (optional) input video file") ap.add_argument("-o", "--output", type=str, default="", help="path to (optional) output video file") ap.add_argument("-d", "--display", type=int, default=0, help="whether or not output frame should be displayed") args = vars(ap.parse_args()) labelsPath = os.path.sep.join([MODEL_PATH, "coco.names"]) LABELS = open(labelsPath).read().strip().split("\n") weightsPath = os.path.sep.join([MODEL_PATH, "yolov3.weights"]) configPath = os.path.sep.join([MODEL_PATH, "yolov3.cfg"]) print("[INFO] loading YOLO from disk...") net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) if USE_GPU: print("[INFO] setting preferable backend and target to CUDA...") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA) ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] print("[INFO] accessing video stream...") vs = cv2.VideoCapture(args["input"] if args["input"] else 0) writer = None while True: (grabbed, frame) = vs.read() if not grabbed: break frame = imutils.resize(frame, width=700) results = detect_people(frame, net, ln, personIdx=LABELS.index("person")) violate = set() abnormal = set() if len(results) >= 2: centroids = np.array([r[2] for r in results]) D = dist.cdist(centroids, centroids, metric="euclidean") for i in range(0, D.shape[0]): for j in range(i + 1, D.shape[1]): if D[i, j] < MIN_DISTANCE: violate.add(i) violate.add(j) for (i, (prob, bbox, centroid)) in enumerate(results): (startX, startY, endX, endY) = bbox (cX, cY) = centroid color = (0, 255, 0) if i in violate: color = (0, 0, 255) cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) cv2.circle(frame, (cX, cY), 5, color, 1) text = "Social Distancing Violations: {}".format(len(violate)) cv2.putText(frame, text, (10, frame.shape[0] - 55), cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 2) if args["display"] > 0: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break if args["output"] != "" and writer is None: fourcc = cv2.VideoWriter_fourcc(*"MJPG") writer = cv2.VideoWriter(args["output"], fourcc, 2, (frame.shape[1], frame.shape[0]), True) # if args["input"]: # video = cv2.VideoCapture("teste2.avi"); # (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') # if int(major_ver) < 3 : # fps = video.get(cv2.cv.CV_CAP_PROP_FPS) # print ("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)) # else : # fps = video.get(cv2.CAP_PROP_FPS) # print ("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)) # video.release() # if not args["input"]: # # Start default camera # video = cv2.VideoCapture(0); # # Find OpenCV version # (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') # # With webcam get(CV_CAP_PROP_FPS) does not work. # # Let's see for ourselves. # if int(major_ver) < 3 : # fps = video.get(cv2.cv.CV_CAP_PROP_FPS) # print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)) # else : # fps = video.get(cv2.CAP_PROP_FPS) # print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)) # # Number of frames to capture # num_frames = 120; # print("Capturing {0} frames".format(num_frames)) # # Start time # start = time.time() # # Grab a few frames # for i in range(0, num_frames) : # ret, frame = video.read() # # End time # end = time.time() # # Time elapsed # seconds = end - start # print ("Time taken : {0} seconds".format(seconds)) # # Calculate frames per second # fps = num_frames / seconds # print("Estimated frames per second : {0}".format(fps)) # # Release video # video.release() if writer is not None: writer.write(frame)
#!/usr/bin/env python """ extractORFs.py <6 frame translation> Author: Tony Papenfuss Date: Wed Aug 23 08:52:58 EST 2006 """ import os, sys import re, copy import fasta, sequence pattern = re.compile('[\*|X{200,}]') minLen = 20 i = 0 writer = fasta.MfaWriter('ORFs.fa') filename = sys.argv[1] header,dna = fasta.load(filename) header = header.strip() orfIter = sequence.extractOrfsIter(dna, minLen=minLen, pattern=pattern) for i,gStart,gEnd,orf in orfIter: h = '%s.%i.%i-%i Length=%i' % (header,i,gStart,gEnd,len(orf)) writer.write(h, orf) fasta.pretty(h, orf) if gStart<gEnd: s = dna[gStart-1:gEnd] print gStart, gEnd, len(s), len(s) % 3==0 print sequence.codons(s, remainder=True) print sequence.translate(s) else: gStart,gEnd = gEnd,gStart s = dna[gStart-1:gEnd] s = sequence.reverseComplement(s) print gStart, gEnd, len(s), len(s) % 3==0 print sequence.codons(s, remainder=True) print sequence.translate(s) print writer.close()
import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from nltk.stem.snowball import SnowballStemmer from nltk.stem.wordnet import WordNetLemmatizer from nltk.corpus import stopwords import nltk def do_numpy_stuff(): return np.sqrt(5) def tokenize(text): wordnet = WordNetLemmatizer() words = nltk.WhitespaceTokenizer().tokenize(text.lower()) contractions_removed = [fix_punctuation(word) for word in words] stopped_words = [word for word in contractions_removed if word not in stopwords.words('english')] return [wordnet.lemmatize(word) for word in stopped_words] def fix_punctuation(word): word = word.replace('\'', '') word = word.replace('.', '') return word def count_vectorizer(doc): vect = CountVectorizer(stop_words='english', tokenizer=nltk.WhitespaceTokenizer().tokenize) word_counts = vect.fit_transform(doc) word_array = word_counts.toarray() return word_array def tfidf(all_text): vect = TfidfVectorizer(stop_words='english', tokenizer=tokenize) tfid = vect.fit_transform(text) tfid_array = tfid.toarray() return tfid_array
# -*- coding: utf-8 -*- """ Runs the simulation with the right variables and handles output. """ import helpers.runsimulation as run import numpy as np import matplotlib.pyplot as plt import helpers.muaanalytical as muaAna def Rvsr(): """ Runs the simulation once for a given mua. """ mua = 10. prop = run.runSimulation(mua, 0.0, "Rvsr") prop.RvsrOutput(mua) def RvsMua(): """ Runs the simulation for a range of mua and keeps track of reflectance at a certain r. """ r = 0.3 mualist = [] Rlist = [] analist = [] for mua in np.arange(0.1, 7, 0.4): mua = round(mua,1) print(mua) # Running simulation prop = run.runSimulation(mua, r, "RvsMua") # Analytical solution for comparison anaMua = muaAna.reflectance(mua, r) mualist.append(mua) analist.append(anaMua) Rlist.append(prop.Rr) prop.RvsMua(mualist, Rlist, analist) def savePhotons(): """ Runs the simulation and saves reflections to a csv file. """ r = 0.3 for mua in np.arange(0.35, 0.351, 0.05): mua = round(mua,2) print(mua) run.runSimulation(mua, r, "savePhotons") def RvsrThree(): """ Runs the simulation for three different mua and plots all R vs r together. """ r = 0. # The simulations are run and right outputs generated prop = run.runSimulation(0.1, r, "Rvsr") ir_list1, T1 = prop.RvsrOutput(0.1) prop = run.runSimulation(1., r, "Rvsr") ir_list2, T2 = prop.RvsrOutput(1.) prop = run.runSimulation(2., r, "Rvsr") ir_list3, T3 = prop.RvsrOutput(2.) # The plot of all three is made plt.figure() plt.plot(ir_list1, T1, 'bo') plt.plot(ir_list2, T2, 'go') plt.plot(ir_list3, T3, 'ro') plt.yscale("log") plt.xlim(0.01, 0.35) plt.ylim(10**-2, 10**2) plt.title("Reflectance as a function of radius") plt.xlabel("r (cm)") plt.ylabel("R (cm^-2)") plt.show()
from PyQt5.QtCore import * from PyQt5.QtWidgets import * from requests import Session from threading import Thread from threadutil import run_in_main_thread from time import sleep name = input("Please enter your name: ") chat_url = "https://build-system.fman.io/chat" server = Session() # GUI: app = QApplication([]) text_area = QPlainTextEdit() text_area.setFocusPolicy(Qt.NoFocus) message = QLineEdit() layout = QVBoxLayout() layout.addWidget(text_area) layout.addWidget(message) window = QWidget() window.setLayout(layout) window.show() append_message = run_in_main_thread(text_area.appendPlainText) def fetch_new_messages(): while True: response = server.get(chat_url).text if response: append_message(response) sleep(.5) def send_message(): server.post(chat_url, {"name": name, "message": message.text()}) message.clear() # Signals: message.returnPressed.connect(send_message) thread = Thread(target=fetch_new_messages, daemon=True) thread.start() app.exec_()
import discord from discord.ext import commands import random import aiohttp import os import asyncio from assets import quotes eat_reactions = ['''_{0}_, you try to eat _{1}_, but you can\'t do it.' So you, leave with the taste of failure hanging in your mouth''', '_{0}_, you try to gobble up _{1}_. They prove to be a tight fit, but you manage to eat them.', '_{0}_, you advance toward _{1}_, but you turn back and run, because they want to eat you too.', '_{0}_, you finish eating _{1}_, and have a long nap, the sign of a good meal.'] pet_reactions = ['_{0}_, you pet _{1}_, as they smile from your petting.', '_{0}_, you try to pet _{1}_, but they run away, scared of your affection.', '_{0}_, you pet _{1}_. They are happy and now they want to pet you too.'] drink_reactions = ['_{0}_, you pierce {1} with a straw, as they cry out in pain.', '_{0}_, you try to drink _{1}_, but you realize they aren\'t liquid.', '_{0}_, you try to drink _{1}_, but they have a mirror. So now you\'re drinking yourself.'] hug_reactions = ['_{0}_, you try to hug _{1}_, but they run away because they don\'t understand your affection.', '_{0}_, you hug _{1}_. and they smile, because they didn\'t know they needed it.', '_{0}_, you hug _{1}_, and they hug you back, the sign of a good friendship.', '_{0}_, you try to hug _{1}_, but they pull out a knife because they think you were gonna mug them.', ] fart_reactions = ['*farting noises*', 'Toot', '*Blerrrtttt*', '**no.**', '_ew_'] def eat_func(author, user, bot): if user.id == bot.user.id: return '''For the record, I **DO NOT** appreciate being eaten. Even though I am digital and you would probably get electrocuted.''' elif not author == user: return random.choice(eat_reactions).format(author.display_name, user.display_name) else: return 'You try to eat yourself, but fail miserably' def pet_func(author, user, bot): if user.id == bot.user.id: return 'Well, what can I say? I do like people petting me :)' elif not author == user: return random.choice(pet_reactions).format(author.display_name, user.display_name) else: return 'You pet yourself. I feel you, mate' def drink_func(author, user, bot): if user.id == bot.user.id: return 'You try to drink me, but you can\'t, because I\'m digital!' elif not author == user: return random.choice(drink_reactions).format(author.display_name, user.display_name) else: return 'You pierce yourself with a straw. Not surprisingly, it hurts.' def fart_reaction(): return random.choice(fart_reactions) def hug_func(author, user, bot, actual_user): if actual_user.id == bot.user.id: return 'Even though I\'m digital, I do appreciate hugs :)' elif not author == user: return random.choice(hug_reactions).format(author.display_name, user.display_name) else: return 'You try to hug yourself, I feel you. Mind if I give you a hug?' class Funzies(commands.Cog, description='Fun commands for everyone to try out'): def __init__(self, bot): self.bot = bot self.hello_last = None @commands.Cog.listener() async def on_message(self, message): if self.bot.user.mentioned_in(message): # check for mentions, and react with the heart emoji await message.add_reaction('👋️') @commands.command(name='eat', description='Eats the person, I guess') async def eat_func_actual(self, ctx, user: discord.Member): await ctx.send(eat_func(ctx.author, user, self.bot)) @commands.command(name='drink', description='Beware, you might spill the user you\'re trying to drink.') async def drink_func(self, ctx, user: discord.Member): await ctx.send(drink_func(ctx.author, user, self.bot)) @commands.command(name='hug', description='Try hugging yourself.') async def hug_func(self, ctx, user: discord.Member): await ctx.send(hug_func(ctx.author, user, self.bot)) @commands.command(name='pet', description='Pets whoever you mention. Exceptions may exist.') async def pet_func(self, ctx, user: discord.Member): await ctx.send(pet_func(ctx.author, user, self.bot)) @commands.command(name='fart', description='Does this really need a description?') async def fart_func(self, ctx): await ctx.send(fart_reaction()) @commands.command(name='art', description='You might think this uses a machine learning algorithm, ' 'but no.\nIt just gets a random image from ' '__[this website](https://thisartworkdoesnotexist.com)__') async def art_command(self, ctx): async with aiohttp.ClientSession() as session: async with session.get(f"https://thisartworkdoesnotexist.com/") as response: f = await response.content.read() if not os.path.exists('./storage/art.png'): with open('./storage/art.png', 'w') as imageFile: print('created file art.png inside the storage folder') # create file if not present with open('./storage/art.png', 'wb') as fl: fl.write(f) # f is already in binary, so don't need to decode fl = open('./storage/art.png', 'rb') pic = discord.File(fl) await ctx.send(file=pic) @commands.command() async def inspire(self, ctx): await ctx.send(quotes.get_quote()) @commands.command(name='hello', description='Says hello, and remembers nothing after that. I\'m kidding, ' 'it knows who last said hello to it.') async def hello(self, ctx, *, some_text=None): await ctx.send(f'Hello, {ctx.author.display_name}!') if some_text is not None: await ctx.send(f'I don\'t understand why you say "{some_text}". Doesn\'t make sense.') if self.hello_last == ctx.author.id: await ctx.send('This does feel familiar, though') self.hello_last = ctx.author.id # saves the last user's id to be used again @commands.command(name='sendemoji', description='Sends the emoji, and that\'s it.\n' 'It can send animated emojis too!\n' 'Note: Only guild-only emojis are taken into account.') async def emoji_command(self, ctx, emoji_name): for x in ctx.guild.emojis: if emoji_name == x.name: return await ctx.reply(str(x)) await ctx.send(f'No Guild-only emoji called {emoji_name} found.') @commands.command(name='selfdestruct', description='**DO NOT USE THIS COMMAND**') async def selfdestruct_command(self, ctx): msg_content = "███" message = await ctx.send(f"{msg_content}") for x in range(2): await asyncio.sleep(1) msg_content = msg_content[:-1] await message.edit(content=f'{msg_content}') await asyncio.sleep(1) await message.edit(content='**Kaboom!**') def setup(bot): bot.add_cog(Funzies(bot))