content stringlengths 5 1.05M |
|---|
from typing import List, Optional, Union
from fedot.core.dag.graph_node import GraphNode
from fedot.core.data.data import InputData, OutputData
from fedot.core.log import Log, default_log
from fedot.core.operations.factory import OperationFactory
from fedot.core.operations.operation import Operation, get_default_params
class Node(GraphNode):
"""
Base class for Node definition in Pipeline structure
:param nodes_from: parent nodes which information comes from
:param operation_type: str type of the operation defined in operation repository
the custom prefix can be added after / (to highlight the specific node)
The prefix will be ignored at Implementation stage
:param log: Log object to record messages
"""
def __init__(self, nodes_from: Optional[List['Node']],
operation_type: Optional[Union[str, 'Operation']] = None,
log: Log = None, **kwargs):
passed_content = kwargs.get('content')
if passed_content:
# Define operation, based on content dictionary
operation = self._process_content_init(passed_content)
else:
# There is no content for node
operation = self._process_direct_init(operation_type)
super().__init__(content={'name': operation,
'params': operation.params}, nodes_from=nodes_from)
if not log:
self.log = default_log(__name__)
else:
self.log = log
self._fitted_operation = None
self.rating = None
def _process_content_init(self, passed_content: dict) -> Operation:
""" Updating content in the node """
if isinstance(passed_content['name'], str):
# Need to convert name of operation into operation class object
operation_factory = OperationFactory(operation_name=passed_content['name'])
operation = operation_factory.get_operation()
passed_content.update({'name': operation})
else:
operation = passed_content['name']
self.content = passed_content
return operation
@staticmethod
def _process_direct_init(operation_type) -> Operation:
""" Define operation based on direct operation_type without defining content in the node """
if not operation_type:
raise ValueError('Operation is not defined in the node')
if not isinstance(operation_type, str):
# AtomizedModel
operation = operation_type
else:
# Define appropriate operation or data operation
operation_factory = OperationFactory(operation_name=operation_type)
operation = operation_factory.get_operation()
return operation
# wrappers for 'operation' field from GraphNode class
@property
def operation(self):
return self.content['name']
@operation.setter
def operation(self, value):
self.content.update({'name': value})
@property
def fitted_operation(self):
if hasattr(self, '_fitted_operation'):
return self._fitted_operation
else:
return None
@fitted_operation.setter
def fitted_operation(self, value):
if value is None:
if hasattr(self, '_fitted_operation'):
del self._fitted_operation
else:
self._fitted_operation = value
def unfit(self):
self.fitted_operation = None
def fit(self, input_data: InputData) -> OutputData:
"""
Run training process in the node
:param input_data: data used for operation training
"""
if self.fitted_operation is None:
self.fitted_operation, operation_predict = self.operation.fit(data=input_data,
is_fit_pipeline_stage=True)
else:
operation_predict = self.operation.predict(fitted_operation=self.fitted_operation,
data=input_data,
is_fit_pipeline_stage=True)
return operation_predict
def predict(self, input_data: InputData, output_mode: str = 'default') -> OutputData:
"""
Run prediction process in the node
:param input_data: data used for prediction
:param output_mode: desired output for operations (e.g. labels, probs, full_probs)
"""
operation_predict = self.operation.predict(fitted_operation=self.fitted_operation,
data=input_data,
output_mode=output_mode,
is_fit_pipeline_stage=False)
return operation_predict
@property
def custom_params(self) -> dict:
# Operation is not fitted yet
if self.fitted_operation is None:
return self.operation.get_params
else:
try:
return self.fitted_operation.get_params()
except Exception as ex:
self.log.info(f'Operation get params failed due to: {ex}')
return {}
@custom_params.setter
def custom_params(self, params):
if params:
# Complete the dictionary if it is incomplete
default_params = get_default_params(self.operation.operation_type)
if default_params is not None:
params = {**default_params, **params}
self.operation.params = params
def __str__(self):
return str(self.operation.operation_type)
class PrimaryNode(Node):
"""
The class defines the interface of Primary nodes where initial task data is located
:param operation_type: str type of the operation defined in operation repository
:param node_data: dictionary with InputData for fit and predict stage
:param kwargs: optional arguments (i.e. logger)
"""
def __init__(self, operation_type: Optional[Union[str, 'Operation']] = None, node_data: dict = None, **kwargs):
if 'nodes_from' in kwargs:
del kwargs['nodes_from']
super().__init__(nodes_from=None, operation_type=operation_type, **kwargs)
if node_data is None:
self._node_data = {}
self.direct_set = False
else:
self._node_data = node_data
# Was the data passed directly to the node or not
self.direct_set = True
def fit(self, input_data: InputData, **kwargs) -> OutputData:
"""
Fit the operation located in the primary node
:param input_data: data used for operation training
"""
self.log.ext_debug(f'Trying to fit primary node with operation: {self.operation}')
if self.direct_set:
input_data = self.node_data
else:
self.node_data = input_data
return super().fit(input_data)
def unfit(self):
self.fitted_operation = None
if hasattr(self, 'node_data'):
self.node_data = None
def predict(self, input_data: InputData,
output_mode: str = 'default') -> OutputData:
"""
Predict using the operation located in the primary node
:param input_data: data used for prediction
:param output_mode: desired output for operations (e.g. labels, probs, full_probs)
"""
self.log.ext_debug(f'Predict in primary node by operation: {self.operation}')
if self.direct_set:
input_data = self.node_data
else:
self.node_data = input_data
return super().predict(input_data, output_mode)
def get_data_from_node(self):
""" Method returns data if the data was set to the nodes directly """
return self.node_data
@property
def node_data(self):
if hasattr(self, '_node_data'):
return self._node_data
else:
return {}
@node_data.setter
def node_data(self, value):
if value is None:
if hasattr(self, '_node_data'):
del self._node_data
else:
self._node_data = value
class SecondaryNode(Node):
"""
The class defines the interface of Secondary nodes modifying tha data flow in Pipeline
:param operation_type: str type of the operation defined in operation repository
:param nodes_from: parent nodes where data comes from
:param kwargs: optional arguments (i.e. logger)
"""
def __init__(self, operation_type: Optional[Union[str, 'Operation']] = None,
nodes_from: Optional[List['Node']] = None, **kwargs):
if nodes_from is None:
nodes_from = []
super().__init__(nodes_from=nodes_from, operation_type=operation_type, **kwargs)
def fit(self, input_data: InputData, **kwargs) -> OutputData:
"""
Fit the operation located in the secondary node
:param input_data: data used for operation training
"""
self.log.ext_debug(f'Trying to fit secondary node with operation: {self.operation}')
secondary_input = self._input_from_parents(input_data=input_data, parent_operation='fit')
return super().fit(input_data=secondary_input)
def predict(self, input_data: InputData, output_mode: str = 'default') -> OutputData:
"""
Predict using the operation located in the secondary node
:param input_data: data used for prediction
:param output_mode: desired output for operations (e.g. labels, probs, full_probs)
"""
self.log.ext_debug(f'Obtain prediction in secondary node with operation: {self.operation}')
secondary_input = self._input_from_parents(input_data=input_data,
parent_operation='predict')
return super().predict(input_data=secondary_input, output_mode=output_mode)
def _input_from_parents(self, input_data: InputData,
parent_operation: str) -> InputData:
if len(self.nodes_from) == 0:
raise ValueError('No parent nodes found')
self.log.ext_debug(f'Fit all parent nodes in secondary node with operation: {self.operation}')
parent_nodes = self._nodes_from_with_fixed_order()
parent_results, target = _combine_parents(parent_nodes, input_data,
parent_operation)
secondary_input = InputData.from_predictions(outputs=parent_results)
return secondary_input
def _nodes_from_with_fixed_order(self):
if self.nodes_from is not None:
return sorted(self.nodes_from, key=lambda node: node.descriptive_id)
else:
return None
def _combine_parents(parent_nodes: List[Node],
input_data: InputData,
parent_operation: str):
"""
Method for combining predictions from parent node or nodes
:param parent_nodes: list of parent nodes, from which predictions will
be combined
:param input_data: input data from pipeline abstraction (source input data)
:param parent_operation: name of parent operation (fit or predict)
:return parent_results: list with OutputData from parent nodes
:return target: target for final pipeline prediction
"""
if input_data is not None:
# InputData was set to pipeline
target = input_data.target
parent_results = []
for parent in parent_nodes:
if parent_operation == 'predict':
prediction = parent.predict(input_data=input_data)
parent_results.append(prediction)
elif parent_operation == 'fit':
prediction = parent.fit(input_data=input_data)
parent_results.append(prediction)
else:
raise NotImplementedError()
if input_data is None:
# InputData was set to primary nodes
target = prediction.target
return parent_results, target
|
from django.urls import path
from .views import email_list_signup, contact_form, subscribe
urlpatterns = [
# path('email-signup/', email_list_signup, name='email-list-signup'),
path('subscribe/', email_list_signup, name='subscribe'),
path('contact/', contact_form, name='contact')
]
|
import code.main as cm
def test_correctness():
assert cm.inc(3) == 4
def test_incorrectness():
assert not cm.inc(3) == 5 |
from Model.filemodel import file_model
import tkinter.messagebox
from Encryption.crypto import encrypter
from tkinter import *
from tkinter.filedialog import askdirectory, askopenfilenames
from GUI.ContextMenuListBox import ContextMenuListBox
from tkinter import ttk
from os.path import *
from os import path
class MainWindow():
__crypter: encrypter = None
__decrypt_files: list[file_model] = []
def __init__(self, crypter: encrypter):
self.root = Tk()
self.root.wm_title("FileLock")
self.set_geometry_main_window()
icon_path = join(dirname(dirname(realpath(__file__))),"Images", "applicationicon.ico")
self.root.iconbitmap(icon_path)
self.__crypter = crypter
self.tab_control = ttk.Notebook(self.root)
self.encrypt_tab = Frame(self.tab_control)
self.decrypt_tab = Frame(self.tab_control)
self.init_encrypt_tab()
self.init_decrypt_tab()
self.tab_control.add(self.encrypt_tab, text='Encrypt')
self.tab_control.add(self.decrypt_tab, text='Decrypt')
self.tab_control.pack(expand=1, fill="both")
def set_geometry_main_window(self):
application_width = 500
frame_width = self.root.winfo_rootx() - self.root.winfo_x()
window_width = application_width + 2 * frame_width
application_height = 500
titlebar_height = self.root.winfo_rooty() - self.root.winfo_y()
window_height = application_height + titlebar_height + frame_width
x = self.root.winfo_screenwidth() // 2 - window_width // 2
y = self.root.winfo_screenheight() // 2 - window_height // 2
self.root.geometry('{}x{}+{}+{}'.format(application_width, application_height, x, y))
def start(self):
self.root.mainloop()
def get_encrypted_files(self):
self.__decrypt_files = list(self.__crypter.read_files())
self.listbox_decrypt_files.delete('0', 'end')
for element in self.__decrypt_files:
self.listbox_decrypt_files.insert("end", element.FullPath)
def open_file_dialog(self):
filenames = askopenfilenames()
for element in filenames:
self.listbox_encrypt_files.insert("end", element)
def button_encryption_click(self):
selected_items = self.listbox_encrypt_files.curselection()
if len(selected_items) == 0:
tkinter.messagebox.showerror("Encryption failed", "No files selected.")
return
counter = 0
simple_mode = True
if self.encryption_type_combobox.current() == 1:
simple_mode = False
for selected_item in selected_items:
try:
if simple_mode:
self.__crypter.encrypt(file_model(self.listbox_encrypt_files.get(selected_item), -1))
else:
self.__crypter.encrypt2(file_model(self.listbox_encrypt_files.get(selected_item), -1))
except Exception:
tkinter.messagebox.showerror("Encryption failed", "The encryption failed.")
return
counter = counter + 1
self.label_encrypted_files_counter_text.set("{} of {} files were encrypted".format(counter, len(selected_items)))
self.listbox_encrypt_files.update()
tkinter.messagebox.showinfo("Encryption succeeded", "All files were encrypted.")
self.get_encrypted_files()
def button_decryption_click(self):
selected_items = self.listbox_decrypt_files.curselection()
if len(selected_items) == 0:
tkinter.messagebox.showerror("Decryption failed", "No files selected.")
return
counter = 0
save_directory = askdirectory()
if save_directory == '':
tkinter.messagebox.showerror("Decryption failed", "No save directory was selected.")
return
for selected_item in selected_items:
selected_file_model = next((x for x in self.__decrypt_files if x.FullPath == self.listbox_decrypt_files.get(selected_item)), None)
try:
if selected_file_model.EncryptionType == 1:
data = self.__crypter.decrypt(selected_file_model)
else:
data = self.__crypter.decrypt2(selected_file_model)
fileName = path.basename(selected_file_model.FullPath)
combinedPath = join(save_directory, fileName)
self.__crypter.write_bytes_to_new_file(combinedPath, data)
except Exception:
tkinter.messagebox.showerror("Decryption failed", "The decryption failed.")
return
counter = counter + 1
self.label_decrypted_files_counter_text.set("{} of {} files were decrypted".format(counter, len(selected_items)))
self.listbox_decrypt_files.update()
tkinter.messagebox.showinfo("Decryption succeeded", "All files were decrypted.")
def init_encrypt_tab(self):
Grid.columnconfigure(self.encrypt_tab, 0, weight=1)
Grid.columnconfigure(self.encrypt_tab, 1, weight=1)
Grid.rowconfigure(self.encrypt_tab, 0, weight=0)
Grid.rowconfigure(self.encrypt_tab, 1, weight=0)
Grid.rowconfigure(self.encrypt_tab, 2, weight=1)
Grid.rowconfigure(self.encrypt_tab, 3, weight=0)
Grid.rowconfigure(self.encrypt_tab, 4, weight=0)
label_select_files = Label(self.encrypt_tab, text="Select the files you want to encrypt: ")
label_select_files.grid(column=0, row=0, padx='5', pady='5', sticky=W)
button_open_file_explorer = Button(self.encrypt_tab, text="Open file explorer", command=self.open_file_dialog)
button_open_file_explorer.grid(column=1, row=0, padx='5', pady='5', sticky=E+W)
self.encryption_type_combobox = ttk.Combobox(self.encrypt_tab, state="readonly", values=["Simple", "Extended (Needs more storage)"])
self.encryption_type_combobox.grid(column=0, columnspan=2, row=1, padx=5, pady=5, sticky=E+W)
self.encryption_type_combobox.current(0)
self.listbox_encrypt_files = ContextMenuListBox(self.encrypt_tab, selectmode='multiple')
self.listbox_encrypt_files.grid(column=0, columnspan=2, row=2, padx='5', pady='5', sticky=N+E+S+W)
self.listbox_encrypt_files.configure(selectbackground="dimgray")
scrollbar = Scrollbar(self.listbox_encrypt_files, orient=VERTICAL)
scrollbar.pack(side=RIGHT, fill=Y)
self.listbox_encrypt_files.configure(yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox_encrypt_files.yview)
self.label_encrypted_files_counter_text = StringVar()
self.label_encrypted_files_counter_text.set("")
self.label_encrypted_files_counter = Label(self.encrypt_tab, textvariable=self.label_encrypted_files_counter_text)
self.label_encrypted_files_counter.grid(column=0, columnspan=2, row=3, sticky=E+W)
button_encrypt_files = Button(self.encrypt_tab, text="Encrypt", command=self.button_encryption_click)
button_encrypt_files.grid(column=0, columnspan=2, row=4, padx='5', pady='5', sticky=E+W)
def init_decrypt_tab(self):
Grid.columnconfigure(self.decrypt_tab, 0, weight=1)
Grid.rowconfigure(self.decrypt_tab, 0, weight=0)
Grid.rowconfigure(self.decrypt_tab, 1, weight=1)
Grid.rowconfigure(self.decrypt_tab, 2, weight=0)
Grid.rowconfigure(self.decrypt_tab, 3, weight=0)
label_select_files_decrypt = Label(self.decrypt_tab, text="Select the files you want to decrypt: ")
label_select_files_decrypt.grid(column=0, row=0, padx='5', pady='5', sticky=W)
self.listbox_decrypt_files = Listbox(self.decrypt_tab, selectmode='multiple')
self.listbox_decrypt_files.grid(column=0, row=1, padx='5', pady='5', sticky=N+E+S+W)
self.listbox_decrypt_files.configure(selectbackground="dimgray")
self.get_encrypted_files()
scrollbar = Scrollbar(self.listbox_decrypt_files, orient=VERTICAL)
scrollbar.pack(side=RIGHT, fill=Y)
self.listbox_decrypt_files.configure(yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox_decrypt_files.yview)
self.label_decrypted_files_counter_text = StringVar()
self.label_decrypted_files_counter_text.set("")
self.label_decrypted_files_counter = Label(self.decrypt_tab, textvariable=self.label_decrypted_files_counter_text)
self.label_decrypted_files_counter.grid(column=0, row=2, sticky=E+W)
button_decrypt_files = Button(self.decrypt_tab, text="Decrypt", command=self.button_decryption_click)
button_decrypt_files.grid(column=0, row=3, padx='5', pady='5', sticky=E+W) |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Template'
db.create_table('django_template', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('last_changed', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('dbtemplates', ['Template'])
# Adding M2M table for field sites on 'Template'
db.create_table('django_template_sites', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('template', models.ForeignKey(orm['dbtemplates.template'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('django_template_sites', ['template_id', 'site_id'])
def backwards(self, orm):
# Deleting model 'Template'
db.delete_table('django_template')
# Removing M2M table for field sites on 'Template'
db.delete_table('django_template_sites')
models = {
'dbtemplates.template': {
'Meta': {'ordering': "('name',)", 'object_name': 'Template', 'db_table': "'django_template'"},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['dbtemplates']
|
"""Kaptos schema module."""
import roax.schema as s
class bearing(s.float):
"""Bearing, in degrees."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def validate(self, value):
if value < 0.0 or value >= 360.0:
raise SchemaError("invalid bearing; must be 0.0 ≤ degrees < 360.0")
class modulation(s.str):
"""Signal modulation type."""
def __init__(self, **kwargs):
super().__init__(enum={"am", "fm", "lsb", "usb", "dmr", "dstar"})
|
from aoc2021.util import get_input
from aoc2021.day02 import submarine
def solve_part1(entries):
sub = submarine.Submarine()
sub.navigate(entries)
return sub.position.horizontal * sub.position.depth
def solve_part2(entries):
sub = submarine.Submarine2()
sub.navigate(entries)
return sub.position.horizontal * sub.position.depth
if __name__ == "__main__": # pragma: no cover
entries = get_input("aoc2021/day02/input", "split")
print(solve_part1(entries))
print(solve_part2(entries))
|
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import unittest
from fabric.api import local
from fabric.state import env
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import (
Bridge,
BGP_FSM_ESTABLISHED,
)
from lib.gobgp import GoBGPContainer
from lib.quagga import QuaggaOSPFContainer
def try_local(command, f=local, ok_ret_codes=None, **kwargs):
ok_ret_codes = ok_ret_codes or []
orig_ok_ret_codes = list(env.ok_ret_codes)
try:
env.ok_ret_codes.extend(ok_ret_codes)
return f(command, **kwargs)
finally:
env.ok_ret_codes = orig_ok_ret_codes
def wait_for(f, timeout=120):
interval = 1
count = 0
while True:
if f():
return
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
def get_ifname_with_prefix(prefix, f=local):
command = (
"ip addr show to %s"
" | head -n1 | cut -d'@' -f1 | cut -d' ' -f2") % prefix
return f(command, capture=True)
class ZebraNHTTest(unittest.TestCase):
"""
Test case for Next-Hop Tracking with Zebra integration.
"""
# R1: GoBGP
# R2: GoBGP + Zebra + OSPFd
# R3: Zebra + OSPFd
# R4: Zebra + OSPFd
#
# +----+
# | R3 |... has loopback 10.3.1.1/32
# +----+
# / |
# / |
# / +----+
# / | R4 |
# / +----+
# +----+ |
# | R2 |------+
# +----+
# | 192.168.0.2/24
# |
# | 192.168.0.0/24
# |
# | 192.168.0.1/24
# +----+
# | R1 |
# +----+
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
local("echo 'start %s'" % cls.__name__, capture=True)
cls.r1 = GoBGPContainer(
name='r1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level,
zebra=False)
cls.r2 = GoBGPContainer(
name='r2', asn=65000, router_id='192.168.0.2',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level,
zebra=True,
zapi_version=3,
ospfd_config={
'networks': {
'192.168.23.0/24': '0.0.0.0',
'192.168.24.0/24': '0.0.0.0',
},
})
cls.r3 = QuaggaOSPFContainer(
name='r3',
zebra_config={
'interfaces': {
'lo': [
'ip address 10.3.1.1/32',
],
},
},
ospfd_config={
'networks': {
'10.3.1.1/32': '0.0.0.0',
'192.168.23.0/24': '0.0.0.0',
'192.168.34.0/24': '0.0.0.0',
},
})
cls.r4 = QuaggaOSPFContainer(
name='r4',
ospfd_config={
'networks': {
'192.168.34.0/24': '0.0.0.0',
'192.168.24.0/24': '0.0.0.0',
},
})
wait_time = max(ctn.run() for ctn in [cls.r1, cls.r2, cls.r3, cls.r4])
time.sleep(wait_time)
cls.br_r1_r2 = Bridge(name='br_r1_r2', subnet='192.168.12.0/24')
[cls.br_r1_r2.addif(ctn) for ctn in (cls.r1, cls.r2)]
cls.br_r2_r3 = Bridge(name='br_r2_r3', subnet='192.168.23.0/24')
[cls.br_r2_r3.addif(ctn) for ctn in (cls.r2, cls.r3)]
cls.br_r2_r4 = Bridge(name='br_r2_r4', subnet='192.168.24.0/24')
[cls.br_r2_r4.addif(ctn) for ctn in (cls.r2, cls.r4)]
cls.br_r3_r4 = Bridge(name='br_r3_r4', subnet='192.168.34.0/24')
[cls.br_r3_r4.addif(ctn) for ctn in (cls.r3, cls.r4)]
def test_01_BGP_neighbor_established(self):
"""
Test to start BGP connection up between r1-r2.
"""
self.r1.add_peer(self.r2, bridge=self.br_r1_r2.name)
self.r2.add_peer(self.r1, bridge=self.br_r1_r2.name)
self.r1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.r2)
def test_02_OSPF_established(self):
"""
Test to start OSPF connection up between r2-r3 and receive the route
to r3's loopback '10.3.1.1'.
"""
def _f():
return try_local(
"vtysh -c 'show ip ospf route'"
" | grep '10.3.1.1/32'",
f=self.r2.local,
ok_ret_codes=[1], # for the empty case with "grep" command
capture=True)
wait_for(f=_f)
def test_03_add_ipv4_route(self):
"""
Test to add IPv4 route to '10.3.1.0/24' whose nexthop is r3's
loopback '10.3.1.1'.
Also, test to receive the initial MED/Metric.
"""
# MED/Metric = 10(r2 to r3) + 10(r3-ethX to r3-lo)
med = 20
def _f_r2():
return try_local(
"gobgp global rib -a ipv4 10.3.1.0/24"
" | grep 'Med: %d'" % med,
f=self.r2.local,
ok_ret_codes=[1], # for the empty case with "grep" command
capture=True)
def _f_r1():
return try_local(
"gobgp global rib -a ipv4 10.3.1.0/24"
" | grep 'Med: %d'" % med,
f=self.r1.local,
ok_ret_codes=[1], # for the empty case with "grep" command
capture=True)
self.r2.local(
'gobgp global rib add -a ipv4 10.3.1.0/24 nexthop 10.3.1.1')
wait_for(f=_f_r2)
wait_for(f=_f_r1)
def test_04_link_r2_r3_down(self):
"""
Test to update MED to the nexthop if the Metric to that nexthop is
changed by the link down.
If the link r2-r3 goes down, MED/Metric should be increased.
"""
# MED/Metric = 10(r2 to r4) + 10(r4 to r3) + 10(r3-ethX to r3-lo)
med = 30
def _f_r2():
return try_local(
"gobgp global rib -a ipv4 10.3.1.0/24"
" | grep 'Med: %d'" % med,
f=self.r2.local,
ok_ret_codes=[1], # for the empty case with "grep" command
capture=True)
def _f_r1():
return try_local(
"gobgp global rib -a ipv4 10.3.1.0/24"
" | grep 'Med: %d'" % med,
f=self.r1.local,
ok_ret_codes=[1], # for the empty case with "grep" command
capture=True)
ifname = get_ifname_with_prefix('192.168.23.3/24', f=self.r3.local)
self.r3.local('ip link set %s down' % ifname)
wait_for(f=_f_r2)
wait_for(f=_f_r1)
def test_05_link_r2_r3_restore(self):
"""
Test to update MED to the nexthop if the Metric to that nexthop is
changed by the link up again.
If the link r2-r3 goes up again, MED/Metric should be update with
the initial value.
"""
# MED/Metric = 10(r2 to r3) + 10(r3-ethX to r3-lo)
med = 20
def _f_r2():
return try_local(
"gobgp global rib -a ipv4 10.3.1.0/24"
" | grep 'Med: %d'" % med,
f=self.r2.local,
ok_ret_codes=[1], # for the empty case with "grep" command
capture=True)
def _f_r1():
return try_local(
"gobgp global rib -a ipv4 10.3.1.0/24"
" | grep 'Med: %d'" % med,
f=self.r1.local,
ok_ret_codes=[1], # for the empty case with "grep" command
capture=True)
ifname = get_ifname_with_prefix('192.168.23.3/24', f=self.r3.local)
self.r3.local('ip link set %s up' % ifname)
wait_for(f=_f_r2)
wait_for(f=_f_r1)
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
|
__author__ = 'Sean Yu'
'''created @2015/10/26'''
# -*- coding: UTF-8 -*-
__author__ = 'Sean Yu'
__mail__ = 'try.dash.now@gmail.com'
'''
created 2015/9/18
'''
import unittest
import os
import sys
pardir =os.path.dirname(os.path.realpath(os.getcwd()))
#pardir= os.path.sep.join(pardir.split(os.path.sep)[:-1])
sys.path.append(os.path.sep.join([pardir,'lib']))
class test_IxiaNetwork(unittest.TestCase):
def test_Ixia(self):
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-31 21:14
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20171031_2019'),
]
operations = [
migrations.AddField(
model_name='post',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(default=''),
),
]
|
from PyQt4 import QtCore, QtGui
import thread
import tensorflow as tf
from rospy import Time
from keras.models import load_model
from keras.backend import set_session
from Model.WorldModel import WorldModel
from Model.EgoVehicle import EgoVehicle
from Model.TargetVehicle import TargetVehicle
from Model.Road import Road
from ImageProcessing.Sampler import Sampler
from ImageProcessing.Extrapolation import Extrapolation
from ImageProcessing.Visualiser import Visualiser
from LaneDetection.LaneDetector import LaneDetector
import DataProcessing.Listeners.src.live_reader.scripts.listener as listener
import os, sys
class Controller(object):
"""Connects all other classes and controls the flow of the program.
Regulates the input/output and connects the listeners to the visualiser
and gui. The worldmodel is also initialised here to keep track of the
internal representation. The listeners pass the input which is stored
in the worldmodel, which is then used to make a visualisation and pass
to the gui for drawing.
Attributes:
core_terminal: QWidget that holds the roscore xterm
play_terminal: QWidget that holds the rosbag play xterm
lanes: neural network that detects the lanes from a given image
visualiser: generates scenes to be visualised.
world_model: stores in internal representation.
sess: the tensorflow session, necessary for multitreaded functioning
graph: the default tensorflow graph, also necessary
model: the neural network model
extrap: class that extrapolates the generated dots
file: the path to the bag file we're working with
raw_queue: multithreading Queue used to pass raw images to the GUI
lane_queue: multithreading Queue used to pass lane images to the GUI
visual_queue: multithreading Queue used to pass visualisation images
to the GUI
raw_image_ready: python Event used to signal when a raw image is ready
lane_image_ready: python Event used to signal when a lane image
is ready
visual_image_ready: python Event used to signal when a visualisation
image is ready
time_dots: the list of (time,dots) tuples that will be written to
the bag
"""
def __init__(self, core_terminal, play_terminal, file, raw_image_ready,
lane_image_ready, visual_image_ready, raw_queue, lane_queue,
visual_queue, time_dots, play_pause):
self.ld = LaneDetector()
self.world_model = WorldModel(EgoVehicle(0, 0), None)
self.sess = tf.Session()
self.graph = tf.get_default_graph()
set_session(self.sess)
if getattr(sys, 'frozen', False):
model_path = os.path.join(os.path.dirname(sys.executable), 'LaneDetection/full_CNN_model.h5')
else:
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../LaneDetection/full_CNN_model.h5')
self.model = load_model(model_path)
self.extrap = Extrapolation()
self.visualiser = Visualiser(640, 960, self.world_model)
self.sampler = Sampler()
self.file = file
self.raw_queue = raw_queue
self.lane_queue = lane_queue
self.visual_queue = visual_queue
self.raw_image_ready = raw_image_ready
self.lane_image_ready = lane_image_ready
self.visual_image_ready = visual_image_ready
self.time_dots = time_dots
self.search_process = QtCore.QProcess()
self.core_terminal = core_terminal
self.play_terminal = play_terminal
self.core_process = QtCore.QProcess()
self.play_process = QtCore.QProcess()
self.play_pause_id = play_pause
if len(self.file) > 0:
self.start_playterminal()
self.start_coreterminal()
thread.start_new_thread(listener.listen, (self,))
"""
The following update functions are used for both online and offline
reading through the use of listeners. When the listeners receive new data
they call the appropriate Controller function that then processes and
updates the visualisation/gui.
"""
def update_targets(self, targets):
"""Updates the targets when called by the listener and updates the
world_model.
"""
position_targets = []
for target in targets.targets:
position_targets.append(TargetVehicle(target.kinodynamics.pose.x,
target.kinodynamics.pose.y,
target.id))
self.world_model.update_targets(position_targets)
def update_image(self, frame, time):
"""Updates the image when called by the listener, detects the lanes
from the image and updates the world_model. Gets the scene from the
visualiser and draws it with the GUI."""
self.raw_queue.put(frame)
self.raw_image_ready.set()
raw_plus_lanes, _, top_down_lanes = self.ld.get_road_lines(frame,
self.model,
self.graph,
self.sess)
self.lane_queue.put(raw_plus_lanes)
self.lane_image_ready.set()
dots = self.sampler.find_dots(top_down_lanes)
self.time_dots.append((time, dots))
self.world_model.update_road(Road(self.extrap.extrapolate_dots(dots)))
scene = self.visualiser.make_scene()
self.visual_queue.put(scene)
self.visual_image_ready.set()
def kill_terminals(self):
"""Kills both terminal processes"""
self.play_process.terminate()
self.core_process.terminate()
def start_playterminal(self):
"""Starts the rosbag play terminal process"""
self.play_process.start('xterm', ['-into',
str(self.play_terminal.winId()),
'-hold', '-e',
'source /opt/ros/melodic/setup.bash'
'; rosbag play --pause ' +
self.file[0]])
self.play_pause_id.append(self.play_process.pid())
def start_coreterminal(self):
"""Starts the roscore terminal process"""
self.core_process.start('xterm', ['-into',
str(self.core_terminal.winId()),
'-hold', '-e',
'source /opt/ros/melodic/setup.bash'
'; roscore']) |
from wok.contrib.hooks import HeadingAnchors
hooks = {
'page.template.post': [ HeadingAnchors() ],
}
|
#setup.py build --compiler=mingw32
from distutils.core import setup, Extension
setup (name = '_omniscience',
ext_modules = [Extension('_omniscience',sources = ['_omniscience.c'],extra_compile_args=['-std=gnu99','-Ofast'])])
import shutil
shutil.copy("./build/lib.win32-2.7/_omniscience.pyd",".") |
import os
from setuptools import setup
import xenon
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fobj:
readme = fobj.read()
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as fobj:
reqs = fobj.read().splitlines()
setup(name='xenon',
version=xenon.__version__,
author='Michele Lacchia',
author_email='michelelacchia@gmail.com',
url='https://xenon.readthedocs.org/',
download_url='https://pypi.python.org/xenon/',
license='MIT',
description='Monitor code metrics for Python on your CI server',
platforms='any',
long_description=readme,
packages=['xenon'],
tests_require=['tox', 'httpretty'],
install_requires=reqs,
entry_points={'console_scripts': ['xenon = xenon:main']},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Utilities',
'Topic :: Internet :: WWW/HTTP',
]
)
|
""" Solves a variation of optimization problem of targeted Pagerank.
It solves the targeted optimization problem as described
in "Fairness-Aware Link Analysis"[1] paper but for the topk of the
pagerank algorithm. It also preserve the order of these nodes compare
to simple targeted. Makes use of cvx optimization package[2].
Parameters:
phi (float): Wanted ratio for the protected category
(category 1 - R). If phi == 0 => phi = ratio of protected
category (i.e. |R|/|N|). N:= Set of alla nodes.
R:= set of red nodes.
k (int): Number of nodes to take into consideration.
Creates:
"out_sensitive_jump_v.txt" (txt file): Μinimizer vector.
"out_sensitive_pagerank.txt" (txt file): Sensitive Pagerank
corresponds to minimizer vector.
References:
[1]
[2] https://cvxopt.org/
TODO:
Add reference to paper.
"""
import numpy as np
import cvxopt as cvx
from cvxopt import spmatrix, matrix, solvers
from scipy.sparse import coo_matrix
import sys
def uniformPR(M, gamma = 0.15):
""" Returns the Pagerank and the Q matrix.
Q: P(v) = Qv and v is the unifrom vector (i.e. v(i) -= 1/N).
N:= number of nodes.
Parameters:
M (nd.array): Adjacency Matrix.
gama (float): Probability to jump. Jump vector's coafficient.
Returns:
p (1D np.array): Pagerank vector.
Q (2D nd.array): P(v) = Qv.
"""
n = M.shape[0]
d = np.array(np.reciprocal(M.sum(axis = 1).T))
d = d.flatten()
D = np.diag(np.array(d))
P = D.dot(M) # up to here xorrect - M must have float elements
Q = gamma*np.linalg.inv((np.eye(n) - (1-gamma)*P))
Q = Q.T# up to here also correct
u = (np.ones(n)/(1.0*n)).T
p = Q.dot(u) # also correct
with open("opt_pagerank.txt", "w") as file_one:
for value in p:
file_one.write("%f\n" %value)
return (p,Q) # Returns pagerank vector and Q matrix
def top_k(p, Q, k):
""" Creates matrix used for inequality constraints.
Matrix K describes the constraint that is responsible to keep the
k nodes in the top k positions.
Returns:
K (2D nd.array): Constraint matrix.
"""
n = len(p)
sorted_index = np.argsort(-p)
# calculate matrix K
K = np.zeros((k*(n-k), n))
k_row = 0
for i in sorted_index[:k]:
for j in sorted_index[k:]:
K[k_row,:] = Q[int(j),:] - Q[int(i),:]
k_row += 1
return K
def new_index(p, index):
""" Return top-k indexes.
Returns:
index_new (pytho list): index_new[i] == true if node i is red
and is in top-k nodes of pagerank.
top_k (python list): top_k[i] == true if node i is in top-k
nodes of pagerank.
"""
n = len(p)
sorted_index = np.argsort(-p)
#keep only the rows of top-K nodes
top_k = [False for i in range(n)]
for i in sorted_index[:k]:
top_k[i] = True # True if node is in topk results
index_new = [top_k[i] and index[i] for i in range(n)]
return index_new, top_k
def fairPR(M, index, phi, k):
""" Returns minimizer jump vector, corresponding pagerank.
Parameters:
M (nd.array): Adjacency Matrix.
index (python list): index[i] == True if node i belongs
to protected category (i.e. category 1).
phi (float): Wanted ratio for protected category.
k (int): Number of nodes for the targeted algorithm.
Returns:
x (CVX 1D matrix): Minimizer jump vector.
Q*x (CVX 1D matrix): Optimal pagerank vector.
"""
p,Q = uniformPR(M)
K = top_k(p, Q, k)
index_top_k_1, index_top_k = new_index(p, index)
n = p.size
G = matrix([matrix(-1*np.eye(n)), matrix(K)])
h = matrix([matrix(np.zeros(n)), matrix(np.zeros(k*(n-k)))])
A = matrix([matrix((Q[index_top_k_1,:].sum(0) - (phi * Q[index_top_k,:]).sum(0))),matrix(np.ones(n))],(n,2)).T
b = matrix([0.,1.])
Q = matrix(Q)
p = matrix(p)
x = solvers.qp(P = Q.T*Q, q=-Q.T*p, G=G, h=h,A=A,b=b)['x']
p, Z = uniformPR(M)
return (x,Q*x)
def create_adj_matrix(filename = "out_graph.txt"):
""" Creates Adjacency matrix and index list.
Parameters:
edge_file (txt file): edge list file in proper format.
com_file (txt file): community file in proper format.
Returns:
M (nd.array): Adjacency Matrix.
index (python list): index[i] == True if node i belongs to protected
category (i.e. category 1).
Notes:
See specifications for the files in general description
of the project.
TODO: Add link for the general specifications.
"""
n = 0
with open(filename, "r") as file_one:
n = int(file_one.readline())
M = np.zeros((n,n))
with open(filename, "r") as file_one:
file_one.readline()
for line in file_one:
edge = line.split()
M[int(edge[0])][int(edge[1])] = 1.
index = [False for i in range(n)]
with open("out_community.txt", "r") as file_one:
file_one.readline()
for line in file_one:
info = line.split()
if int(info[1]) == 1:
index[int(info[0])] = True
j = 0
for i in range(n):
if not M[i].any():
j += 1
M[i] = [1. for k in M[i]]
print("%d vectors without out neighbors" %j)
return M, index
# Read command line arguments.
if len(sys.argv) != 3:
print("provide 2 arguments <ratio for protected category>, <top-k>")
else:
pr = float(sys.argv[1])
k = int(sys.argv[2])
# Get Adjacency Matrix and index vector
# (index[i] == true if node i belongs to protected category).
M, index = create_adj_matrix()
if pr == 0:
pr = sum(index) / len(index)
print("phi: ", pr)
# Get fair pagerank and corresponding jump vector.
j, fp = fairPR(M,index, pr, k)
# Change from data type cvx matrix to numpy array.
j = np.array(j).flatten()
fp = np.array(fp).flatten()
# Store results in text files.
with open("out_targeted_topk_jump_v.txt", "w") as file_one:
for i in j:
file_one.write(str(i) + "\n")
with open("out_stargeted_topk.txt", "w") as file_one:
for i in fp:
file_one.write(str(i) + "\n") |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: attestation.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='attestation.proto',
package='ethereum.eth.v1alpha1',
syntax='proto3',
serialized_options=_b('\n\031org.ethereum.eth.v1alpha1B\020AttestationProtoP\001Z6github.com/prysmaticlabs/ethereumapis/eth/v1alpha1;eth\252\002\025Ethereum.Eth.v1alpha1\312\002\025Ethereum\\Eth\\v1alpha1'),
serialized_pb=_b('\n\x11\x61ttestation.proto\x12\x15\x65thereum.eth.v1alpha1\"p\n\x0b\x41ttestation\x12\x18\n\x10\x61ggregation_bits\x18\x01 \x01(\x0c\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.ethereum.eth.v1alpha1.AttestationData\x12\x11\n\tsignature\x18\x03 \x01(\x0c\"\x88\x01\n\x1c\x41ggregateAttestationAndProof\x12\x18\n\x10\x61ggregator_index\x18\x01 \x01(\x04\x12\x35\n\taggregate\x18\x03 \x01(\x0b\x32\".ethereum.eth.v1alpha1.Attestation\x12\x17\n\x0fselection_proof\x18\x02 \x01(\x0c\"}\n\"SignedAggregateAttestationAndProof\x12\x44\n\x07message\x18\x01 \x01(\x0b\x32\x33.ethereum.eth.v1alpha1.AggregateAttestationAndProof\x12\x11\n\tsignature\x18\x02 \x01(\x0c\"\xb9\x01\n\x0f\x41ttestationData\x12\x0c\n\x04slot\x18\x01 \x01(\x04\x12\x17\n\x0f\x63ommittee_index\x18\x02 \x01(\x04\x12\x19\n\x11\x62\x65\x61\x63on_block_root\x18\x03 \x01(\x0c\x12\x31\n\x06source\x18\x04 \x01(\x0b\x32!.ethereum.eth.v1alpha1.Checkpoint\x12\x31\n\x06target\x18\x05 \x01(\x0b\x32!.ethereum.eth.v1alpha1.Checkpoint\"j\n\tCrosslink\x12\r\n\x05shard\x18\x01 \x01(\x04\x12\x13\n\x0bparent_root\x18\x02 \x01(\x0c\x12\x13\n\x0bstart_epoch\x18\x03 \x01(\x04\x12\x11\n\tend_epoch\x18\x04 \x01(\x04\x12\x11\n\tdata_root\x18\x05 \x01(\x0c\")\n\nCheckpoint\x12\r\n\x05\x65poch\x18\x01 \x01(\x04\x12\x0c\n\x04root\x18\x02 \x01(\x0c\x42\x97\x01\n\x19org.ethereum.eth.v1alpha1B\x10\x41ttestationProtoP\x01Z6github.com/prysmaticlabs/ethereumapis/eth/v1alpha1;eth\xaa\x02\x15\x45thereum.Eth.v1alpha1\xca\x02\x15\x45thereum\\Eth\\v1alpha1b\x06proto3')
)
_ATTESTATION = _descriptor.Descriptor(
name='Attestation',
full_name='ethereum.eth.v1alpha1.Attestation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aggregation_bits', full_name='ethereum.eth.v1alpha1.Attestation.aggregation_bits', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='ethereum.eth.v1alpha1.Attestation.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='ethereum.eth.v1alpha1.Attestation.signature', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=156,
)
_AGGREGATEATTESTATIONANDPROOF = _descriptor.Descriptor(
name='AggregateAttestationAndProof',
full_name='ethereum.eth.v1alpha1.AggregateAttestationAndProof',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aggregator_index', full_name='ethereum.eth.v1alpha1.AggregateAttestationAndProof.aggregator_index', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aggregate', full_name='ethereum.eth.v1alpha1.AggregateAttestationAndProof.aggregate', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='selection_proof', full_name='ethereum.eth.v1alpha1.AggregateAttestationAndProof.selection_proof', index=2,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=295,
)
_SIGNEDAGGREGATEATTESTATIONANDPROOF = _descriptor.Descriptor(
name='SignedAggregateAttestationAndProof',
full_name='ethereum.eth.v1alpha1.SignedAggregateAttestationAndProof',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='ethereum.eth.v1alpha1.SignedAggregateAttestationAndProof.message', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='ethereum.eth.v1alpha1.SignedAggregateAttestationAndProof.signature', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=297,
serialized_end=422,
)
_ATTESTATIONDATA = _descriptor.Descriptor(
name='AttestationData',
full_name='ethereum.eth.v1alpha1.AttestationData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='slot', full_name='ethereum.eth.v1alpha1.AttestationData.slot', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='committee_index', full_name='ethereum.eth.v1alpha1.AttestationData.committee_index', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beacon_block_root', full_name='ethereum.eth.v1alpha1.AttestationData.beacon_block_root', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='ethereum.eth.v1alpha1.AttestationData.source', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target', full_name='ethereum.eth.v1alpha1.AttestationData.target', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=425,
serialized_end=610,
)
_CROSSLINK = _descriptor.Descriptor(
name='Crosslink',
full_name='ethereum.eth.v1alpha1.Crosslink',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shard', full_name='ethereum.eth.v1alpha1.Crosslink.shard', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_root', full_name='ethereum.eth.v1alpha1.Crosslink.parent_root', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_epoch', full_name='ethereum.eth.v1alpha1.Crosslink.start_epoch', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_epoch', full_name='ethereum.eth.v1alpha1.Crosslink.end_epoch', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_root', full_name='ethereum.eth.v1alpha1.Crosslink.data_root', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=612,
serialized_end=718,
)
_CHECKPOINT = _descriptor.Descriptor(
name='Checkpoint',
full_name='ethereum.eth.v1alpha1.Checkpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='epoch', full_name='ethereum.eth.v1alpha1.Checkpoint.epoch', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root', full_name='ethereum.eth.v1alpha1.Checkpoint.root', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=720,
serialized_end=761,
)
_ATTESTATION.fields_by_name['data'].message_type = _ATTESTATIONDATA
_AGGREGATEATTESTATIONANDPROOF.fields_by_name['aggregate'].message_type = _ATTESTATION
_SIGNEDAGGREGATEATTESTATIONANDPROOF.fields_by_name['message'].message_type = _AGGREGATEATTESTATIONANDPROOF
_ATTESTATIONDATA.fields_by_name['source'].message_type = _CHECKPOINT
_ATTESTATIONDATA.fields_by_name['target'].message_type = _CHECKPOINT
DESCRIPTOR.message_types_by_name['Attestation'] = _ATTESTATION
DESCRIPTOR.message_types_by_name['AggregateAttestationAndProof'] = _AGGREGATEATTESTATIONANDPROOF
DESCRIPTOR.message_types_by_name['SignedAggregateAttestationAndProof'] = _SIGNEDAGGREGATEATTESTATIONANDPROOF
DESCRIPTOR.message_types_by_name['AttestationData'] = _ATTESTATIONDATA
DESCRIPTOR.message_types_by_name['Crosslink'] = _CROSSLINK
DESCRIPTOR.message_types_by_name['Checkpoint'] = _CHECKPOINT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Attestation = _reflection.GeneratedProtocolMessageType('Attestation', (_message.Message,), {
'DESCRIPTOR' : _ATTESTATION,
'__module__' : 'attestation_pb2'
# @@protoc_insertion_point(class_scope:ethereum.eth.v1alpha1.Attestation)
})
_sym_db.RegisterMessage(Attestation)
AggregateAttestationAndProof = _reflection.GeneratedProtocolMessageType('AggregateAttestationAndProof', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATEATTESTATIONANDPROOF,
'__module__' : 'attestation_pb2'
# @@protoc_insertion_point(class_scope:ethereum.eth.v1alpha1.AggregateAttestationAndProof)
})
_sym_db.RegisterMessage(AggregateAttestationAndProof)
SignedAggregateAttestationAndProof = _reflection.GeneratedProtocolMessageType('SignedAggregateAttestationAndProof', (_message.Message,), {
'DESCRIPTOR' : _SIGNEDAGGREGATEATTESTATIONANDPROOF,
'__module__' : 'attestation_pb2'
# @@protoc_insertion_point(class_scope:ethereum.eth.v1alpha1.SignedAggregateAttestationAndProof)
})
_sym_db.RegisterMessage(SignedAggregateAttestationAndProof)
AttestationData = _reflection.GeneratedProtocolMessageType('AttestationData', (_message.Message,), {
'DESCRIPTOR' : _ATTESTATIONDATA,
'__module__' : 'attestation_pb2'
# @@protoc_insertion_point(class_scope:ethereum.eth.v1alpha1.AttestationData)
})
_sym_db.RegisterMessage(AttestationData)
Crosslink = _reflection.GeneratedProtocolMessageType('Crosslink', (_message.Message,), {
'DESCRIPTOR' : _CROSSLINK,
'__module__' : 'attestation_pb2'
# @@protoc_insertion_point(class_scope:ethereum.eth.v1alpha1.Crosslink)
})
_sym_db.RegisterMessage(Crosslink)
Checkpoint = _reflection.GeneratedProtocolMessageType('Checkpoint', (_message.Message,), {
'DESCRIPTOR' : _CHECKPOINT,
'__module__' : 'attestation_pb2'
# @@protoc_insertion_point(class_scope:ethereum.eth.v1alpha1.Checkpoint)
})
_sym_db.RegisterMessage(Checkpoint)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from flask_jwt_extended import jwt_required
from {{cookiecutter.app_name}}.commons.resource import BaseResource
from {{cookiecutter.app_name}}.models import User
from {{cookiecutter.app_name}}.extensions import ma, db
from {{cookiecutter.app_name}}.commons.pagination import paginate
# Note, when developing v2, v3, etc., we would use the same model, just extend the schema and resource for the
# output we desire. If we do want to change the model, then all available versions must be changes to accomidate
# the new model so that we don't break clients.
class UserSchema(ma.ModelSchema):
password = ma.String(load_only=True, required=True)
class Meta:
model = User
sqla_session = db.session
class UserResource(BaseResource):
"""Single object resource
"""
method_decorators = [jwt_required]
schema = UserSchema
def get(self, user_id):
user = User.query.get_or_404(user_id)
return self.serialize(user)
def put(self, user_id):
data = self.deserialize(partial=True)
user = User.query.get_or_404(user_id)
return self.serialize(data, instance=user)
def delete(self, user_id):
user = User.query.get_or_404(user_id)
user.delete()
return {'results': "success"}
class UserList(BaseResource):
"""Creation and get_all
"""
method_decorators = [jwt_required]
schema = UserSchema
def get(self):
query = User.query
return paginate(query, self.schema)
def post(self):
user = self.deserialize()
user.save()
return self.serialize(user), 201
|
from django.core.checks.messages import Error
from django.shortcuts import render
from groups.db.group_forum_data_fetch import GroupDataFetch
from groups.models import Group, GroupUser
from groups.models import GroupComment
from groups.db.groups_data_fetch import Groups
def groups(request):
context = {
"title": "Groups",
}
try:
user = request.user
groups = Groups(user)
context["joined_groups"] = groups.joined_groups
context["available_groups"] = groups.available_groups
except Exception as e:
print(e)
user = None
groups = Group.objects.all()
context["joined_groups"] = None
context["available_groups"] = groups
return render(request, 'groups/groups.html', context)
def group_forum(request, group_id=None):
context = {
"title": "GROUPS_FORUM",
"group_id": group_id,
"name": "",
"img_path": "",
"users": "",
"comments": ""
}
if request.method == "POST":
data = request.POST
group_id = data.get("group_id")
group = Group.objects.filter(id=group_id).first()
# update comment
comment = data.get("comment", False)
if comment:
group_comment = GroupComment(group=group, comment=comment, user=request.user)
group_comment.save()
# POST for Change Member
join_group = data.get('join_group', False)
if join_group:
user = request.user
if join_group == "join":
group_user = GroupUser(group=group, user=user)
group_user.save()
elif join_group == "leave":
group_user = GroupUser.objects.filter(group=group, user=user).first()
group_user.delete()
# group-info
group = GroupDataFetch(group_id=group_id)
if group:
group_data = group.group_data
context["name"] = group_data["name"]
context["img_path"] = group_data["img_path"]
context["users"] = group_data["users"]
context["comments"] = group_data["comments"]
# if the user joined the group
try:
user = request.user
group = Group.objects.filter(id=group_id).first()
context["is_joined"] = GroupUser.objects.filter(group=group, user=user).first()
except Exception as e:
print(e)
context["is_joined"] = False
return render(request, 'groups/group_forum.html', context)
|
# helper function that waits for a given txid to be confirmed by the network
def wait_for_confirmation(client, transaction_id, timeout):
start_round = client.status()["last-round"] + 1
current_round = start_round
while current_round < start_round + timeout:
try:
pending_txn = client.pending_transaction_info(transaction_id)
except Exception:
return
if pending_txn.get("confirmed-round", 0) > 0:
return pending_txn
elif pending_txn["pool-error"]:
raise Exception(
'pool error: {}'.format(pending_txn["pool-error"]))
client.status_after_block(current_round)
current_round += 1
raise Exception(
'pending tx not found in timeout rounds, timeout value = : {}'.format(timeout))
|
from db_analysis import *
|
# This file is part of the Blockchain Data Trading Simulator
# https://gitlab.com/MatthiasLohr/bdtsim
#
# Copyright 2021 Matthias Lohr <mail@mlohr.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import ceil
from typing import Any, Callable, Generator, List, Optional, Tuple
from bdtsim.account import Account
from bdtsim.simulation_result import SimulationResult, TransactionLogCollection
from bdtsim.util.strings import str_block_table
from .renderer import Renderer, ValueType
from .renderer_manager import RendererManager
class PayoffMatrixAccountCell(object):
def __init__(self, account: Account, tx_fees: Optional[Tuple[int, int]] = None,
tx_count: Optional[Tuple[int, int]] = None, funds_diff: Optional[Tuple[int, int]] = None,
balance_diff: Optional[Tuple[int, int]] = None, item_share: Optional[Tuple[float, float]] = None,
autoscale_func: Optional[Callable[[Any, ValueType], Any]] = None) -> None:
self._account = account
self._tx_fees = tx_fees
self._tx_count = tx_count
self._funds_diff = funds_diff
self._balance_diff = balance_diff
self._item_share = item_share
self._autoscale_func = autoscale_func
@property
def account(self) -> Account:
return self._account
@property
def tx_fees(self) -> Optional[Tuple[int, int]]:
return self._tx_fees
@property
def funds_diff(self) -> Optional[Tuple[int, int]]:
return self._funds_diff
@property
def balance_diff(self) -> Optional[Tuple[int, int]]:
return self._balance_diff
def _autoscale(self, value: Any, value_type: ValueType) -> Any:
if self._autoscale_func is None:
return value
else:
return self._autoscale_func(value, value_type)
def __str__(self) -> str:
results = ['- %s -' % self._account.name]
for label, interval, value_type in (
('TX Fees', self._tx_fees, ValueType.GAS),
('TX Count', self._tx_count, ValueType.PLAIN),
('Funds Diff', self._funds_diff, ValueType.WEI),
('Bal. Diff', self._balance_diff, ValueType.WEI),
('Item Share', self._item_share, ValueType.PLAIN)
):
if interval is None:
results.append('%s: 0' % label)
else:
if interval[0] == interval[1]:
results.append('%s: %d' % (label, self._autoscale(interval[0], value_type)))
else:
results.append('%s: [%d, %d]' % (
label,
self._autoscale(interval[0], value_type),
self._autoscale(interval[1], value_type)
))
return '\n'.join(results)
@staticmethod
def from_aggregation_summary_list(aggregation_summary_list: List[TransactionLogCollection.Aggregation],
account: Account,
autoscale_func: Optional[Callable[[Any, ValueType], Any]] = None
) -> 'PayoffMatrixAccountCell':
def _safe_attr_generator(attr_name: str) -> Generator[Any, None, None]:
for entry in aggregation_summary_list:
item = entry.get(account)
if item is not None:
yield getattr(item, attr_name)
if len(aggregation_summary_list):
return PayoffMatrixAccountCell(
account=account,
tx_fees=(
min(_safe_attr_generator('tx_fees_min')),
max(_safe_attr_generator('tx_fees_max'))
),
tx_count=(
min(_safe_attr_generator('tx_count_min')),
max(_safe_attr_generator('tx_count_max'))
),
funds_diff=(
min(_safe_attr_generator('funds_diff_min')),
max(_safe_attr_generator('funds_diff_max'))
),
balance_diff=(
min(_safe_attr_generator('balance_diff_min')),
max(_safe_attr_generator('balance_diff_max'))
),
item_share=(
min(_safe_attr_generator('item_share_min')),
max(_safe_attr_generator('item_share_max'))
),
autoscale_func=autoscale_func
)
else:
return PayoffMatrixAccountCell(
account=account,
autoscale_func=autoscale_func
)
class PayoffMatrixCell(object):
def __init__(self, seller_cell: PayoffMatrixAccountCell, buyer_cell: PayoffMatrixAccountCell) -> None:
self._seller_cell = seller_cell
self._buyer_cell = buyer_cell
@property
def seller_cell(self) -> PayoffMatrixAccountCell:
return self._seller_cell
@property
def buyer_cell(self) -> PayoffMatrixAccountCell:
return self._buyer_cell
@staticmethod
def from_aggregation_summary_list(aggregation_summary_list: List[TransactionLogCollection.Aggregation],
seller: Account, buyer: Account,
autoscale_func: Optional[Callable[[Any, ValueType], Any]] = None
) -> 'PayoffMatrixCell':
return PayoffMatrixCell(
seller_cell=PayoffMatrixAccountCell.from_aggregation_summary_list(
aggregation_summary_list=list(filter(
lambda item: item.get(seller) is not None,
aggregation_summary_list
)),
account=seller,
autoscale_func=autoscale_func
),
buyer_cell=PayoffMatrixAccountCell.from_aggregation_summary_list(
aggregation_summary_list=list(filter(
lambda item: item.get(buyer) is not None,
aggregation_summary_list
)),
account=buyer,
autoscale_func=autoscale_func
)
)
class PayoffMatrix(object):
def __init__(self, seller: Account, buyer: Account, cell_hh: PayoffMatrixCell, cell_hm: PayoffMatrixCell,
cell_mh: PayoffMatrixCell, cell_mm: PayoffMatrixCell) -> None:
self._seller = seller
self._buyer = buyer
self._cell_hh = cell_hh
self._cell_hm = cell_hm
self._cell_mh = cell_mh
self._cell_mm = cell_mm
def __str__(self) -> str:
seller_honest_tbl_half_str = str_block_table(
blocks=[
[str(self._cell_hh.seller_cell), str(self._cell_hh.buyer_cell)],
[str(self._cell_hm.seller_cell), str(self._cell_hm.buyer_cell)]
],
column_separator=' │ ',
line_crossing='─┼─',
row_separator='─'
)
seller_malicious_tbl_half_str = str_block_table(
blocks=[
[str(self._cell_mh.seller_cell), str(self._cell_mh.buyer_cell)],
[str(self._cell_mm.seller_cell), str(self._cell_mm.buyer_cell)]
],
column_separator=' │ ',
line_crossing='─┼─',
row_separator='─'
)
values_tbl_str = str_block_table(
blocks=[[seller_honest_tbl_half_str, seller_malicious_tbl_half_str]],
column_separator=' ║ ',
row_separator='─'
)
prefix_width = len(self._buyer.name) + 2 # +2 for ' ✓' and ' ✗'
values_width = len(values_tbl_str.split('\n')[0])
values_height = len(values_tbl_str.split('\n'))
value_row_height = (values_height - 1) / 2
symbol_height = ceil(value_row_height / 2) - 1
buyer_str_height = ceil(values_height / 2) - 1
seller_honest_tbl_half_width = len(seller_honest_tbl_half_str.split('\n')[0])
seller_malicious_tbl_half_width = len(seller_malicious_tbl_half_str.split('\n')[0])
buyer_tbl_str = (
('\n' * symbol_height)
+ ' ' * len(self._buyer.name)
+ ' ✓'
+ ('\n' * (buyer_str_height - symbol_height))
+ self._buyer.name + ' \n'
+ ('\n' * symbol_height)
+ ' ' * len(self._buyer.name)
+ ' ✗'
)
# puzzle together the output parts
output = (
(' ' * prefix_width)
+ ' ║ '
+ self._seller.name.center(values_width)
)
output += (
'\n'
+ (' ' * prefix_width)
+ ' ║ '
+ '✓'.center(seller_honest_tbl_half_width)
+ ' '
+ '✗'.center(seller_malicious_tbl_half_width)
)
output += (
'\n'
+ ('═' * prefix_width)
+ '═╬═'
+ ('═' * values_width)
)
output += '\n' + str_block_table(
blocks=[[buyer_tbl_str, values_tbl_str]],
column_separator=' ║ '
)
return output
@staticmethod
def from_simulation_result(simulation_result: SimulationResult,
autoscale_func: Optional[Callable[[Any, ValueType], Any]] = None) -> 'PayoffMatrix':
# initialize aggregation summary lists
asl_hh = []
asl_hm = []
asl_mh = []
asl_mm = []
for final_node in simulation_result.execution_result_root.final_nodes:
seller_honest = final_node.account_completely_honest(simulation_result.seller)
buyer_honest = final_node.account_completely_honest(simulation_result.buyer)
if seller_honest and buyer_honest:
asl_hh.append(final_node.aggregation_summary)
elif seller_honest and not buyer_honest:
asl_hm.append(final_node.aggregation_summary)
elif not seller_honest and buyer_honest:
asl_mh.append(final_node.aggregation_summary)
elif not seller_honest and not buyer_honest:
asl_mm.append(final_node.aggregation_summary)
else:
raise RuntimeError('Should be impossible to reach hear! Please contact developers.')
return PayoffMatrix(
seller=simulation_result.seller,
buyer=simulation_result.buyer,
cell_hh=PayoffMatrixCell.from_aggregation_summary_list(
aggregation_summary_list=asl_hh,
seller=simulation_result.seller,
buyer=simulation_result.buyer,
autoscale_func=autoscale_func
),
cell_hm=PayoffMatrixCell.from_aggregation_summary_list(
aggregation_summary_list=asl_hm,
seller=simulation_result.seller,
buyer=simulation_result.buyer,
autoscale_func=autoscale_func
),
cell_mh=PayoffMatrixCell.from_aggregation_summary_list(
aggregation_summary_list=asl_mh,
seller=simulation_result.seller,
buyer=simulation_result.buyer,
autoscale_func=autoscale_func
),
cell_mm=PayoffMatrixCell.from_aggregation_summary_list(
aggregation_summary_list=asl_mm,
seller=simulation_result.seller,
buyer=simulation_result.buyer,
autoscale_func=autoscale_func
)
)
class PayoffMatrixRenderer(Renderer):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(PayoffMatrixRenderer, self).__init__(*args, **kwargs)
def render(self, simulation_result: SimulationResult) -> bytes:
payoff_matrix = PayoffMatrix.from_simulation_result(
simulation_result=simulation_result,
autoscale_func=self.autoscale
)
return str(payoff_matrix).encode('utf-8') + b'\n'
RendererManager.register('payoff-matrix', PayoffMatrixRenderer)
|
class Node:
def __init__(self,data=None,next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def get_next(self):
return self.next_node
def set_next(self,new_node):
self.next_node = new_node
class Queue:
def __init__(self,head=None):
self.head = head
def enqueue(self,data):
new_item = Node(data)
current = self.head
if current is None:
self.head = new_item
else:
while current.get_next():
current = current.get_next()
current.set_next(new_item)
def dequeue(self):
current = self.head
if current != None:
self.head = current.get_next()
else:
print("Queue is empty")
def size(self):
current = self.head
count = 0
while current:
count +=1
current = current.get_next()
return count
def print_queue(self):
current = self.head
temp = []
while current:
temp.append(current.get_data())
current = current.get_next()
return temp
def is_empty(self):
if self.head == None:
print("Queue is empty!")
else:
print("Queue isn't empty!")
|
# Copyright (c) 2013 The SAYCBridge Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core.position import *
from core.call import Call
from suit import SUITS
import copy
import math
import operator
# I'm not sure this needs to be its own class.
class Vulnerability(object):
def __init__(self, name):
# FIXME: We should find a better storage system than strings.
self.name = name or 'None'
assert self.name in ('E-W', 'N-S', 'None', 'Both'), "%s is not a valid vulnerability" % self.name
name_to_identifier = { 'E-W': 'EW', 'N-S': 'NS', 'None': 'NO', 'Both': 'BO' }
identifier_to_name = dict([(identifier, name) for name, identifier in name_to_identifier.items()])
@property
def identifier(self):
return self.name_to_identifier[self.name]
@classmethod
def from_identifier(cls, identifier):
return Vulnerability(cls.identifier_to_name[identifier])
@classmethod
def from_string(cls, string):
return Vulnerability(string)
def gib_name(self):
return { 'E-W': 'e', 'N-S': 'n', 'None': '-', 'Both': 'b' }[self.name]
@classmethod
def from_board_number(self, board_number):
# http://www.jazclass.aust.com/bridge/scoring/score11.htm
# FIXME: There must be a more compact way to represent this series.
number_to_vulnerability = {
0: 'E-W', # board 16
1: 'None',
2: 'N-S',
3: 'E-W',
4: 'Both',
5: 'N-S',
6: 'E-W',
7: 'Both',
8: 'None',
9: 'E-W',
10: 'Both',
11: 'None',
12: 'N-S',
13: 'Both',
14: 'None',
15: 'N-S',
}
return Vulnerability(number_to_vulnerability[board_number % 16])
def is_vulnerable(self, position):
if self.name == "None":
return False
if self.name == "Both":
return True
return position.char in self.name
# FIXME: It's unclear if this class should expose just call_names or Call objects.
class CallHistory(object):
@classmethod
def _calls_from_calls_string(cls, calls_string):
if not calls_string:
return []
if ',' in calls_string:
calls_string = calls_string.replace(',', ' ')
calls_string = calls_string.strip() # Remove any trailing whitespace.
call_names = calls_string.split(' ')
# This if exists to support string == ''
if not call_names or not call_names[0]:
return []
# from_string may be more forgiving than we want...
calls = map(Call.from_string, call_names)
assert None not in calls, "Failed to parse calls string: '%s'" % calls_string
return calls
@classmethod
def from_string(cls, history_string, dealer_char=None, vulnerability_string=None):
dealer = Position.from_char(dealer_char) if dealer_char else None
vulnerability = Vulnerability.from_string(vulnerability_string)
calls = cls._calls_from_calls_string(history_string)
return CallHistory(calls, dealer=dealer, vulnerability=vulnerability)
@classmethod
def dealer_from_board_number(cls, board_number):
# It's unclear if this number->dealer/vulnerability knowledge belongs in CallHistory or in Board.
dealer_index = (board_number + 3) % 4
return Position.from_index(dealer_index)
@classmethod
def from_board_number_and_calls_string(cls, board_number, calls_string):
vulnerability = Vulnerability.from_board_number(board_number)
dealer = cls.dealer_from_board_number(board_number)
calls = cls._calls_from_calls_string(calls_string)
return CallHistory(calls=calls, dealer=dealer, vulnerability=vulnerability)
@classmethod
def empty_for_board_number(cls, board_number):
return cls.from_board_number_and_calls_string(board_number, '')
def __init__(self, calls=None, dealer=None, vulnerability=None):
self.calls = calls or []
self.dealer = dealer or NORTH
self.vulnerability = vulnerability or Vulnerability.from_board_number(1)
def __str__(self):
return self.calls_string()
def __len__(self):
return len(self.calls)
def can_double(self):
# Make sure we haven't already doubled.
if not self.last_non_pass().is_contract():
return False
return not self.declarer().in_partnership_with(self.position_to_call())
def can_redouble(self):
if not self.last_non_pass().is_double():
return False
return self.declarer().in_partnership_with(self.position_to_call())
# This may belong on a separate bridge-rules object?
def is_legal_call(self, call):
assert not self.is_complete()
if call.is_pass():
return True
last_contract = self.last_contract()
if not last_contract:
return not call.is_double() and not call.is_redouble()
# Doubles do not have levels.
if call.level:
if last_contract.level > call.level:
return False
if last_contract.level == call.level and last_contract.strain >= call.strain:
return False
if call.is_double() and not self.can_double():
return False
if call.is_redouble() and not self.can_redouble():
return False
return True
def copy_appending_call(self, call):
assert call
assert self.is_legal_call(call)
new_call_history = copy.deepcopy(self)
new_call_history.calls.append(call)
return new_call_history
def copy_with_partial_history(self, last_entry):
partial_history = copy.copy(self)
partial_history.calls = self.calls[:last_entry]
return partial_history
def ascending_partial_histories(self, step):
partial_histories = []
partial_history = self
while partial_history.calls: # We only terminate from here if passed in an empty history.
partial_histories.insert(0, partial_history)
if len(partial_history.calls) < step:
break
partial_history = partial_history.copy_with_partial_history(-step)
return partial_histories
@property
def identifier(self):
return "%s:%s:%s" % (self.dealer.char, self.vulnerability.identifier, self.comma_separated_calls())
@classmethod
def from_identifier(cls, identifier):
components = identifier.split(":")
if len(components) == 3:
dealer_char, vulenerability_identifier, calls_identifier = components
elif len(components) == 2:
# It's very common to have the last colon in the URL missing.
dealer_char, vulenerability_identifier = components
calls_identifier = ""
else:
assert False, "Invalid history identifier: %s" % identifier
dealer = Position.from_char(dealer_char)
vulnerability = Vulnerability.from_identifier(vulenerability_identifier)
calls = cls._calls_from_calls_string(calls_identifier)
return CallHistory(calls=calls, dealer=dealer, vulnerability=vulnerability)
def pretty_one_line(self):
return "Deal: %s, Bids: %s" % (self.dealer.char, self.calls_string())
def calls_string(self):
return " ".join([call.name for call in self.calls])
def comma_separated_calls(self):
return ",".join([call.name for call in self.calls])
@property
def last_call(self):
if not self.calls:
return None
return self.calls[-1]
@property
def last_to_call(self):
if not self.calls:
return None
return self.dealer.position_after_n_calls(len(self.calls) - 1)
def last_non_pass(self):
for call in reversed(self.calls):
if not call.is_pass():
return call
return None
def last_to_not_pass(self):
for callder, call in self.enumerate_reversed_calls():
if not call.is_pass():
return callder
return None
def last_contract(self):
for call in reversed(self.calls):
if call.is_contract():
return call
return None
def position_to_call(self):
# FIXME: Should this return None when is_complete?
# We'd have to check callers, some may assume it's OK to call position_to_call after is_complete.
return self.dealer.position_after_n_calls(len(self.calls))
def calls_by(self, position):
offset_from_dealer = self.dealer.calls_between(position)
if len(self.calls) <= offset_from_dealer:
return []
return [self.calls[i] for i in range(offset_from_dealer, len(self.calls), 4)]
def enumerate_calls(self):
for call_offset, call in enumerate(self.calls):
yield self.dealer.position_after_n_calls(call_offset), call
def enumerate_reversed_calls(self):
# FIXME: This is needlessly complicated.
for call_offset, call in enumerate(reversed(self.calls)):
caller_offset = len(self.calls) - 1 - call_offset
yield self.dealer.position_after_n_calls(caller_offset), call
def competative_auction(self):
first_caller = None
for caller, call in self.enumerate_calls():
if not first_caller and call.is_contract():
first_caller = caller
if call.is_contract() and not caller.in_partnership_with(first_caller):
return True
return False
def last_call_by(self, position):
calls = self.calls_by(position)
if not calls:
return None
return calls[-1]
def first_call_by(self, position):
calls = self.calls_by(position)
if not calls:
return None
return calls[0]
def last_call_by_next_bidder(self):
next_caller = self.position_to_call()
return self.last_call_by(next_caller)
def opener(self):
for caller, call in self.enumerate_calls():
if call.is_contract():
return caller
return None
def declarer(self):
first_caller = None
first_call = None
last_caller = None
last_call = None
for caller, call in self.enumerate_reversed_calls():
if not call.is_contract():
continue
if not last_call:
last_call = call
last_caller = caller
if call.strain == last_call.strain and caller.in_partnership_with(last_caller):
first_call = call
first_caller = caller
return first_caller
def dummy(self):
return declarer.partner
def contract(self):
# Maybe we need a Contract object which holds declarer, suit, level, and doubles?
last_contract = self.last_contract()
if last_contract:
last_non_pass = self.last_non_pass()
double_string = ''
if last_non_pass.is_double():
double_string = 'X'
elif last_non_pass.is_redouble():
double_string = 'XX'
return "%s%s" % (last_contract.name, double_string)
return None
def is_complete(self):
return len(self.calls) > 3 and self.calls[-1].is_pass() and self.calls[-2].is_pass() and self.calls[-3].is_pass()
def is_passout(self):
return self.is_complete() and self.calls[-4].is_pass()
|
import numpy as np
import os
data = np.genfromtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'stockholm_td_adj.tsv'))
mask_feb = data[:,1] == 2
# La temperatura está en la columna 3
np.mean(data[mask_feb,3]) # -3.2121095707365961
# Podemos hasta graficarlo
import matplotlib.pyplot as plt
months = np.arange(1,13)
monthly_mean = [np.mean(data[data[:,1] == month, 3]) for month in months]
fig, ax = plt.subplots()
ax.bar(months, monthly_mean)
ax.set_xlabel("Month")
ax.set_ylabel("Monthly avg. temp.");
plt.show() |
from typing import List
from sqlmodel import Session
from models.permission import Permission, PermissionCreate, PermissionRead, PermissionUpdate
from models.user import UserSession
from services.base import BaseService
from helpers.exceptions import ApiException
from repositories.permission import PermissionRepository
class PermissionService(BaseService):
def __init__(self, user_session: UserSession, db: Session):
super().__init__(user_session, db)
self.permission_repository = PermissionRepository(db)
async def get_permission(self, permission_id) -> PermissionRead:
try:
permission = self.permission_repository.get_entity(
Permission.id == permission_id)
if permission is None:
raise ApiException(
f'Permission with id {permission_id} not found')
return permission
except ApiException as e:
raise e
except Exception:
raise ApiException(f'Permission with id {permission_id} not found')
async def get_permissions(self) -> List[PermissionRead]:
try:
permissions = self.permission_repository.get_entities()
return permissions
except Exception:
raise ApiException('No permissions found')
async def create_permission(self, permission: PermissionCreate) -> PermissionRead:
try:
permission_exists = self.permission_repository.get_entity(
Permission.name == permission.name)
if permission_exists is not None:
raise ApiException('Permission already exists')
permission = self.permission_repository.create_entity(permission)
return permission
except ApiException as e:
raise e
except Exception:
raise ApiException('Error creating permission')
async def update_permission(self, permission_id, permission: PermissionUpdate) -> PermissionRead:
try:
if permission.name is not None:
permission_exists = self.permission_repository.get_entity(
Permission.name == permission.name)
if permission_exists is not None:
raise ApiException('Permission already exists')
permission = self.permission_repository.update_entity(
permission, Permission.id == permission_id)
return permission
except ApiException as e:
raise e
except Exception:
raise ApiException(
f'Error updating permission with id {permission_id}')
async def delete_permission(self, permission_id) -> None:
try:
result = self.permission_repository.delete_entity(
Permission.id == permission_id)
if not result:
raise ApiException(
f'Error deleting permission with id {permission_id}')
except ApiException as e:
raise e
except Exception:
raise ApiException(
f'Error deleting permission with id {permission_id}')
|
#!/usr/bin/env python3
from hpecp import ContainerPlatformClient
import time
client = ContainerPlatformClient(username='admin',
password='admin123',
api_host='127.0.0.1',
api_port=8080,
use_ssl=True,
verify_ssl='/certs/hpecp-ca-cert.pem')
client.create_session()
license = client.license.get_license()
print( license )
print( client.license.delete_license( license['Licenses'][0]['LicenseKey']) )
time.sleep(5)
license = client.license.get_license()
print( license )
|
import sys
from queue import PriorityQueue
v, e = map(int, sys.stdin.readline().split())
k = int(sys.stdin.readline())
inf = float('inf')
graph = [[] for _ in range(v)]
# 간선의 도착점을 입력하기 위한 list (노드수가 20000이기 때문에 v*v 행렬은 약 1.6GB로 메모리 초과)
# 출발점과 도착점이 같고 비용이 다른 경우를 위해서도 v*v를 사용하지 않는다.
cost = [[] for _ in range(v)] # 간선의 비용을 입력하기 위한 list
for _ in range(e):
a, b, c = map(int, sys.stdin.readline().split())
graph[a-1].append(b-1) # 간선의 도착점
cost[a-1].append(c) # 간선의 비용
pq = PriorityQueue() # 우선순위 큐
pq.put((0,k-1)) # 우선순위 큐에 시작점만 거리를 0으로 하여 넣어준다
res = [inf for _ in range(v)] # 결과 값을 저장하기 위한 list
res[k-1] = 0 # 출발 점의 결과 값은 0으로 초기화
while pq.qsize(): # 우선순위 큐가 빌 때까지 반복
dist, node = pq.get() # 최소거리와 그 노드를 우선순위큐에서 get
if res[node] < dist: # 이미 나온 결과 값 보다 더 큰 비용이 들어온 경우 우선순위 큐에서 다시 get
continue
for i, j in zip(graph[node], cost[node]): # 간선의 도착점과 그에 대한 비용
if res[i] > j + dist: # 도착점의 비용과 도착점의 비용 + 전 노드까지의 값을 비교
res[i] = j + dist # 도착점의 비용이 더 클 경우 도착점의 비용 + 전 노드까지의 값으로 변경
pq.put((res[i], i)) # 갱신된 값을 우선순위 큐에도 넣어준다
for i in res:
if i is not inf:
print(i)
else:
print('INF') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Helpers for checking if possible to move forward to next page."""
# pylint: disable=invalid-name,broad-except
import math
import re
def get_pages(pagination, verbose=False):
"""Get current page#, page# of the displayed listings and all pages."""
# Total listings
total_listings_element = pagination.find_element_by_xpath(
'.//div[@class="search_pagination_left"]'
)
total_listings = int(total_listings_element.text.split()[-1])
# Maximum page of listings
max_page_listings = math.ceil(total_listings / 25)
# All available listings being shown on page
showing = pagination.find_element_by_xpath(
'.//div[@class="search_pagination_left"]'
).text
showing_listings = list(map(int, re.findall(r"\d+", showing)))[:-1]
# Current page
curr_page_list = [
int(showing_listing / 25) for showing_listing in showing_listings
]
if 0 in curr_page_list:
current_page_num = 1
else:
current_page_num = max(curr_page_list)
# Get all page numbers (except the current page number)
all_page_nums = []
nav_elements = pagination.find_elements_by_xpath(
'.//div[@class="search_pagination_right"]/a'
)
for nav_element in nav_elements:
if nav_element.text not in ["<", ">"]:
all_page_nums.append(int(nav_element.text))
all_page_nums = list(
set(all_page_nums + [current_page_num]) - set([max_page_listings])
)
all_page_nums = sorted(all_page_nums)
if verbose:
all_page_nums_str = ",".join([str(p) for p in all_page_nums])
print(
f"Available page numbers={all_page_nums_str}, "
f"Current page={current_page_num}, Max page={max(all_page_nums)}"
)
return [current_page_num, curr_page_list, all_page_nums]
def check_movement(pagination):
"""Check for ability to navigate backward or forward between pages."""
pagination_movements = pagination.find_element_by_xpath(
'.//div[@class="search_pagination_right"]'
).find_elements_by_class_name("pagebtn")
# Check for ability to move back
try:
move_back_a = pagination_movements[0]
assert move_back_a.text == "<"
can_move_back = True
print("Can move back, ", end="")
except Exception:
can_move_back = False
print("Can not move back, ", end="")
# Check for ability to move forward
try:
move_forward_a = pagination_movements[-1]
assert move_forward_a.text == ">"
can_move_forward = True
print("Can move forward")
except Exception:
can_move_forward = False
print("Can not move forward, ", end="")
return [can_move_back, can_move_forward]
|
from vit.formatter.entry import Entry
class EntryRemaining(Entry):
def format(self, entry, task):
return self.remaining(entry)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/position.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/position.proto',
package='posXYZ',
syntax='proto3',
serialized_options=_b('Z\010posXYZpb'),
serialized_pb=_b('\n\x14proto/position.proto\x12\x06posXYZ\"&\n\x03Pos\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"F\n\x08Position\x12\x1d\n\x08position\x18\x01 \x01(\x0b\x32\x0b.posXYZ.Pos\x12\x0e\n\x06status\x18\x02 \x01(\x05\x12\x0b\n\x03msg\x18\x03 \x01(\t\"\"\n\x03Msg\x12\x0e\n\x06status\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t2p\n\x0ePositinService\x12.\n\x0bPositionReq\x12\x0b.posXYZ.Msg\x1a\x10.posXYZ.Position\"\x00\x12.\n\x0bPositionPub\x12\x10.posXYZ.Position\x1a\x0b.posXYZ.Msg\"\x00\x42\nZ\x08posXYZpbb\x06proto3')
)
_POS = _descriptor.Descriptor(
name='Pos',
full_name='posXYZ.Pos',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='posXYZ.Pos.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='y', full_name='posXYZ.Pos.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='z', full_name='posXYZ.Pos.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=70,
)
_POSITION = _descriptor.Descriptor(
name='Position',
full_name='posXYZ.Position',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='posXYZ.Position.position', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='posXYZ.Position.status', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='posXYZ.Position.msg', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=142,
)
_MSG = _descriptor.Descriptor(
name='Msg',
full_name='posXYZ.Msg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='posXYZ.Msg.status', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='posXYZ.Msg.msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=178,
)
_POSITION.fields_by_name['position'].message_type = _POS
DESCRIPTOR.message_types_by_name['Pos'] = _POS
DESCRIPTOR.message_types_by_name['Position'] = _POSITION
DESCRIPTOR.message_types_by_name['Msg'] = _MSG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Pos = _reflection.GeneratedProtocolMessageType('Pos', (_message.Message,), {
'DESCRIPTOR' : _POS,
'__module__' : 'proto.position_pb2'
# @@protoc_insertion_point(class_scope:posXYZ.Pos)
})
_sym_db.RegisterMessage(Pos)
Position = _reflection.GeneratedProtocolMessageType('Position', (_message.Message,), {
'DESCRIPTOR' : _POSITION,
'__module__' : 'proto.position_pb2'
# @@protoc_insertion_point(class_scope:posXYZ.Position)
})
_sym_db.RegisterMessage(Position)
Msg = _reflection.GeneratedProtocolMessageType('Msg', (_message.Message,), {
'DESCRIPTOR' : _MSG,
'__module__' : 'proto.position_pb2'
# @@protoc_insertion_point(class_scope:posXYZ.Msg)
})
_sym_db.RegisterMessage(Msg)
DESCRIPTOR._options = None
_POSITINSERVICE = _descriptor.ServiceDescriptor(
name='PositinService',
full_name='posXYZ.PositinService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=180,
serialized_end=292,
methods=[
_descriptor.MethodDescriptor(
name='PositionReq',
full_name='posXYZ.PositinService.PositionReq',
index=0,
containing_service=None,
input_type=_MSG,
output_type=_POSITION,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='PositionPub',
full_name='posXYZ.PositinService.PositionPub',
index=1,
containing_service=None,
input_type=_POSITION,
output_type=_MSG,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_POSITINSERVICE)
DESCRIPTOR.services_by_name['PositinService'] = _POSITINSERVICE
# @@protoc_insertion_point(module_scope)
|
"""
Generic has no implementation in Python
""" |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Initial implementation of The Matrix
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import traceback
import Axon
import time
from Axon.AxonExceptions import ServiceAlreadyExists
from Axon.CoordinatingAssistantTracker import coordinatingassistanttracker as CAT
from Axon.ThreadedComponent import threadedcomponent
from Kamaelia.Util.Splitter import PlugSplitter as Splitter
from Kamaelia.Util.Splitter import Plug
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Graphline import Graphline
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.SingleServer import SingleServer
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists
import sys
if len(sys.argv) > 1:
dj1port = int(sys.argv[1])
else:
dj1port = 1701
if len(sys.argv) > 2:
dj2port = int(sys.argv[2])
else:
dj2port = 1702
if len(sys.argv) > 3:
mockserverport = int(sys.argv[2])
else:
mockserverport = 1700
if len(sys.argv) > 3:
musicport = int(sys.argv[2])
else:
musicport = 1703
if len(sys.argv) > 5:
controlport = int(sys.argv[2])
else:
controlport = 1705
class ConsoleReader(threadedcomponent):
def __init__(self, prompt=">>> "):
super(ConsoleReader, self).__init__()
self.prompt = prompt
def run(self):
while 1:
line = raw_input(self.prompt)
line = line + "\n"
self.send(line, "outbox")
class Backplane(Axon.Component.component):
def __init__(self, name):
super(Backplane,self).__init__()
assert name == str(name)
self.name = name
self.splitter = Splitter().activate()
def main(self):
splitter = self.splitter
cat = CAT.getcat()
try:
cat.registerService("Backplane_I_"+self.name, splitter, "inbox")
cat.registerService("Backplane_O_"+self.name, splitter, "configuration")
except Axon.AxonExceptions.ServiceAlreadyExists, e:
print "***************************** ERROR *****************************"
print "An attempt to make a second backplane with the same name happened."
print "This is incorrect usage."
print
traceback.print_exc(3)
print "***************************** ERROR *****************************"
raise e
while 1:
self.pause()
yield 1
class message_source(Axon.Component.component):
def main(self):
last = self.scheduler.time
while 1:
yield 1
if self.scheduler.time - last > 1:
self.send("message", "outbox")#
last = self.scheduler.time
class echoer(Axon.Component.component):
def main(self):
count = 0
while 1:
self.pause()
yield 1
while self.dataReady("inbox"):
data = self.recv("inbox")
print "echoer #",self.id,":", data, "count:", count
count = count +1
class publishTo(Axon.Component.component):
def __init__(self, destination):
super(publishTo, self).__init__()
self.destination = destination
def main(self):
cat = CAT.getcat()
service = cat.retrieveService("Backplane_I_"+self.destination)
self.link((self,"inbox"), service, passthrough=1)
while 1:
self.pause()
yield 1
class subscribeTo(Axon.Component.component):
def __init__(self, source):
super(subscribeTo, self).__init__()
self.source = source
def main(self):
cat = CAT.getcat()
splitter,configbox = cat.retrieveService("Backplane_O_"+self.source)
Plug(splitter, self).activate()
while 1:
while self.dataReady("inbox"):
d = self.recv("inbox")
self.send(d, "outbox")
yield 1
class MatrixMixer(Axon.Component.component):
debug = 0
Inboxes = ["inbox", "control", "DJ1", "DJ2","music","mixcontrol"]
Outboxes = ["outbox", "signal", "mixcontrolresponse"]
def main(self):
self.dj1_active = 1
self.dj2_active = 1
self.music_active = 1
source_DJ1 = subscribeTo("DJ1").activate()
source_DJ2 = subscribeTo("DJ2").activate()
source_music = subscribeTo("music").activate()
self.link((source_DJ1, "outbox"), (self, "DJ1"))
self.link((source_DJ2, "outbox"), (self, "DJ2"))
self.link((source_music, "outbox"), (self, "music"))
data_dj1 = []
data_dj2 = []
data_music = []
count = 0
while 1:
self.pause()
yield 1
while self.dataReady("DJ1"):
data_dj1.append(self.recv("DJ1"))
while self.dataReady("DJ2"):
data_dj2.append(self.recv("DJ2"))
while self.dataReady("music"):
data_music.append(self.recv("music"))
while self.dataReady("mixcontrol"):
command = self.recv("mixcontrol")
result = self.handleCommand(command)+"\n" # Response always ends with newline
print "RESPONSE TO COMMAND", repr(result)
self.send(result, "mixcontrolresponse")
# Only bother mixing if the sources are active
if self.dj1_active or self.dj2_active or self.music_active:
mix_args = [[],[],[]] # Mixer function expects 3 sources
if self.dj1_active:
mix_args[0]= data_dj1
if self.dj2_active:
mix_args[1]= data_dj2
if self.music_active:
mix_args[2]= data_music
if len(data_dj1) > 0 or len(data_dj2) > 0 or len(data_music) > 0:
X = time.time()
data = self.mix(mix_args)
# sys.stderr.write("mixtime ="+str( time.time() - X)+"\n")
self.send(data, "outbox")
data_dj1 = []
data_dj2 = []
data_music = []
if self.debug and (len(data_dj1) or len(data_dj2) or len(data_music)):
print self.id, "echoer #1",self.id,":", data_dj1, "count:", count
print self.id, " #2",self.id,":", data_dj2, "count:", count
count = count +1
def handleCommand(self, command):
print "COMMAND RECEIVED:", repr(command)
if len(command)>0:
command[0] = command[0].upper()
if command[0] == "SWITCH":
if len(command) !=4: return "FAIL"
command, dest, source, flag = command
command.upper()
dest.upper()
source.upper()
flag.upper()
if flag == "ONLY":
if source == "DJ1":
self.dj1_active, self.dj2_active, self.music_active = (1,0,0)
return "OK"
elif source == "DJ2":
self.dj1_active, self.dj2_active, self.music_active = (0,1,0)
return "OK"
elif source == "PRERECORD":
self.dj1_active, self.dj2_active, self.music_active = (0,0,1)
return "OK"
elif source == "ALL":
self.dj1_active, self.dj2_active, self.music_active = (1,1,1)
return "OK"
elif flag == "ON" or flag == "OFF":
if flag == "ON":
value = 1
else:
value = 0
if source == "DJ1":
self.dj1_active = value
return "OK"
elif source == "DJ2":
self.dj2_active = value
return "OK"
elif source == "PRERECORD":
self.music_active = value
return "OK"
elif source == "ALL":
self.dj1_active, self.dj2_active, self.music_active = (value,value,value)
return "OK"
if command[0] == "QUERY":
if len(command) !=3: return "FAIL"
command, dest, source = command
command.upper(), dest.upper(), source.upper()
if source == "DJ1":
if self.dj1_active:
return "ON"
else:
return "OFF"
elif source == "DJ2":
if self.dj2_active:
return "ON"
else:
return "OFF"
elif source == "PRERECORD":
if self.music_active:
return "ON"
else:
return "OFF"
elif source == "ALL":
result = []
if self.dj1_active: result.append("ON")
else: result.append("OFF")
if self.dj2_active: result.append("ON")
else: result.append("OFF")
if self.music_active: result.append("ON")
else: result.append("OFF")
return " ".join(result)
return "FAIL"
def mix(self, sources):
""" This is a correct, but very slow simple 2 source mixer """
# sys.stderr.write("sourcelen:"+str( [ len(s) for s in sources] )+"\n")
def char_to_ord(char):
raw = ord(char)
if raw >128:
return (-256 + raw)
else:
return raw
def ord_to_char(raw):
if raw <0:
result = 256 + raw
else:
result = raw
return chr(result)
raw_dj1 = "".join(sources[0])
raw_dj2 = "".join(sources[1])
raw_music = "".join(sources[2])
len_dj1 = len(raw_dj1)
len_dj2 = len(raw_dj2)
len_music = len(raw_music)
packet_size = max( len_dj1, len_dj2, len_music )
pad_dj1 = "\0"*(packet_size-len_dj1)
pad_dj2 = "\0"*(packet_size-len_dj2)
pad_music = "\0"*(packet_size-len_music)
raw_dj1 = raw_dj1 + pad_dj1
raw_dj2 = raw_dj2 + pad_dj2
raw_music = raw_music + pad_music
result = []
try:
for i in xrange(0, packet_size,2):
lsb2 = ord(raw_dj2[i])
msb2 = ord(raw_dj2[i+1])
twos_complement_X = (msb2 << 8) + lsb2
if twos_complement_X > 32767:
valuefrom2 = -65536 + twos_complement_X
else:
valuefrom2 = twos_complement_X
lsb1 = ord(raw_dj1[i])
msb1 = ord(raw_dj1[i+1])
twos_complement_X = (msb1 << 8) + lsb1
if twos_complement_X > 32767:
valuefrom1 = -65536 + twos_complement_X
else:
valuefrom1 = twos_complement_X
lsbmusic = ord(raw_music[i])
msbmusic = ord(raw_music[i+1])
twos_complement_X = (msbmusic << 8) + lsbmusic
if twos_complement_X > 32767:
valuefrommusic = -65536 + twos_complement_X
else:
valuefrommusic = twos_complement_X
mixed = (valuefrom2+valuefrom1+valuefrommusic) /3
if mixed < 0:
mixed = 65536 + mixed
mixed_lsb= mixed %256
mixed_msb= mixed >>8
result.append(chr(mixed_lsb))
result.append(chr(mixed_msb))
except IndexError:
print "WARNING: odd (not even) packet size"
return "".join(result)
Backplane("DJ1").activate()
Backplane("DJ2").activate()
Backplane("music").activate()
Backplane("destination").activate()
pipeline(
SingleServer(port=dj1port),
publishTo("DJ1"),
).activate()
pipeline(
SingleServer(port=dj2port),
publishTo("DJ2"),
).activate()
pipeline(
SingleServer(port=musicport),
publishTo("music"),
).activate()
livecontrol = 1
networkserve = 0
standalone = 1
datarate = 1536000
class printer(Axon.Component.component):
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
sys.stdout.write(data)
sys.stdout.flush()
yield 1
#if standalone:
if 0:
networkserve = 0
pipeline(
ReadFileAdaptor("audio.1.raw", chunkrate=1000, readmode="bitrate", bitrate=datarate),
TCPClient("127.0.0.1", dj1port),
).activate()
pipeline(
ReadFileAdaptor("audio.2.raw", chunkrate=1000, readmode="bitrate", bitrate=datarate),
TCPClient("127.0.0.1", dj2port),
).activate()
pipeline(
ReadFileAdaptor("audio.2.raw", chunkrate=1000, readmode="bitrate", bitrate=datarate),
TCPClient("127.0.0.1", musicport),
).activate()
if networkserve:
audiencemix = SingleServer(port=mockserverport)
else:
audiencemix = printer() # SimpleFileWriter("bingle.raw")
if livecontrol:
Graphline(
CONTROL = SingleServer(port=controlport),
TOKENISER = lines_to_tokenlists(),
MIXER = MatrixMixer(),
AUDIENCEMIX = audiencemix,
linkages = {
("CONTROL" , "outbox") : ("TOKENISER" , "inbox"),
("TOKENISER" , "outbox") : ("MIXER" , "mixcontrol"),
("MIXER" , "mixcontrolresponse") : ("CONTROL" , "inbox"),
("MIXER", "outbox") : ("AUDIENCEMIX", "inbox"),
}
).run()
else:
Graphline(
CONTROL = ConsoleReader("mixer desk >> "),
CONTROL_ = consoleEchoer(),
TOKENISER = lines_to_tokenlists(),
MIXER = MatrixMixer(),
AUDIENCEMIX = audiencemix,
linkages = {
("CONTROL" , "outbox") : ("TOKENISER" , "inbox"),
("TOKENISER" , "outbox") : ("MIXER" , "mixcontrol"),
("MIXER" , "mixcontrolresponse") : ("CONTROL_" , "inbox"),
("MIXER", "outbox") : ("AUDIENCEMIX", "inbox"),
}
).run()
if 0:
audienceout = pipeline(
MatrixMixer(),
SingleServer(port=mockserverport)
).run()
def dumping_server():
return pipeline(
SingleServer(mockserverport),
printer(),
)
dumping_server().run()
# Command line mixer control
commandlineMixer = Graphline(
TOKENISER = lines_to_tokenlists(),
MIXER = MatrixMixer(),
FILE = SimpleFileWriter("bingle.raw"),
linkages = {
("USER" , "outbox") : ("TOKENISER" , "inbox"),
("TOKENISER" , "outbox") : ("MIXER" , "mixcontrol"),
("MIXER" , "mixcontrolresponse") : ("USERRESPONSE" , "inbox"),
("MIXER", "outbox") : ("FILE", "inbox"),
}
).run()
# TCP Client sending
audienceout = pipeline(
MatrixMixer(),
# SimpleFileWriter("bingle.raw"),
TCPClient("127.0.0.1", mockserverport)
).run()
#).activate()
def dumping_server():
return pipeline(
SingleServer(mockserverport),
printer(),
)
# Controller mix
####MatrixMixer().run()
#
# Bunch of code used when debugging various bits of code.
#
#
pipeline(
ReadFileAdaptor("audio.1.raw", readsize="60024"), #readmode="bitrate", bitrate =16000000),
publishTo("DJ1"),
).activate()
pipeline(
ReadFileAdaptor("audio.2.raw", readsize="60024"), #readmode="bitrate", bitrate =16000000),
publishTo("DJ2"),
).activate()
audienceout = pipeline(
MatrixMixer(),
### TCPClient("127.0.0.1", mockserverport)
SimpleFileWriter("bingle.raw"),
).run()
###activate()
|
from flask import Flask, request
from implement2D import Calc
from numpy import random
from pprint import pprint
import webbrowser
import json
app = Flask(__name__)
def checkContours(cItem):
if cItem == 'isolado':
return None
else:
return float(cItem)
@app.route("/")
def root():
return app.send_static_file('./index.html')
@app.route("/calc", methods=['POST'])
def calc():
data = json.loads(request.data)
# print(data)
ci = list(map(checkContours, data["ci"]))
# ci = data["ci"].map(lambda x: int(x))
c = Calc(int(data["minutes"]), float(data["alpha"]), float(data["l"]), float(data["dx"]),
float(data["dy"]), float(data["dt"]), ci)
pprint(vars(c))
c.calculate()
c.show()
return "ok"
# MacOS
chrome_path = 'open -a /Applications/Google\ Chrome.app %s'
# Windows
# chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
# Linux
# chrome_path = '/usr/bin/google-chrome %s'
webbrowser.get(chrome_path).open("http://localhost:5000")
app.run(host="0.0.0.0", port=5000, debug=False,
threaded=False, use_reloader=False)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.views.generic.base import TemplateView
class DeviceManagementView(TemplateView):
template_name = "device_management.html"
|
# Hangman
import random # Needed to get a random word from the list of words
from functions import split # Needed to store functions in a separate file
# Error message for when the user enters an invalid number
invalidNumber = "Your number must be an integer equal to or greater than 1."
# List of words
words = ["dog", "cat", "human", "teacher"]
while True:
try:
chances = int(
input(
"How many chances do you want? Please reply with an integer equal to or greater than 1. "
)
)
except ValueError:
print(invalidNumber)
continue
if chances < 1:
print(invalidNumber)
continue
while chances != 0:
# Defines the variables for the hangman
selectedWord = random.choice(
words
) # Picks a random word from the words list
splitWord = split(selectedWord) # Splits that word into single characters
blankWord = "_" * len(
selectedWord
) # Prints as many "_" as there are characters for the selected word
splitBlankWord = split(
blankWord
) # Splits the blank word into single characters
while chances != 0 and splitBlankWord != splitWord:
guess = input(f"Guess a letter: {splitBlankWord}")
if guess in splitWord:
for i in range(len(splitWord)):
if splitWord[i] != guess:
continue
splitBlankWord[i] = guess
else:
print(f"Incorrect guess. {guess} is not a character in the word. ")
chances = chances - 1
if splitBlankWord == splitWord:
print(splitBlankWord)
print(f"You have guessed the correct word! The word was \"{selectedWord}\"")
while True:
playAgain = str.lower(input('Would you like to play again? (Y/n): '))
if playAgain == "y":
chances = 0
break
elif playAgain == "n":
print('Thanks for playing! Terminating program...')
quit()
else:
print('Input not recognized.')
continue
|
import javabridge
import matplotlib
matplotlib.use("module://backend_swing")
import matplotlib.figure
import backend_swing
import numpy as np
import threading
import matplotlib.pyplot as plt
def popup_script_dlg(canvas):
joptionpane = javabridge.JClassWrapper("javax.swing.JOptionPane")
jresult = joptionpane.showInputDialog(
"Enter a script command")
if jresult is not None:
result = javabridge.to_string(jresult)
axes = canvas.figure.axes[0]
eval(result, globals(), locals())
canvas.draw()
def run_ui():
figure = plt.figure()
ax = figure.add_axes([.05, .05, .9, .9])
x = np.linspace(0, np.pi * 8)
ax.plot(x, np.sin(x))
canvas = figure.canvas
frame = canvas.component.getTopLevelAncestor()
toolbar = plt.get_current_fig_manager().frame.toolbar
toolbar.add_button(lambda event:popup_script_dlg(canvas), "hand")
plt.show()
return frame, canvas, toolbar
javabridge.start_vm()
javabridge.activate_awt()
event = threading.Event()
event_ref_id, event_ref = javabridge.create_jref(event)
cpython = javabridge.JClassWrapper('org.cellprofiler.javabridge.CPython')()
set_event_script = (
'import javabridge\n'
'event = javabridge.redeem_jref("%s")\n'
'event.set()') % event_ref_id
adapter = javabridge.run_script("""
new java.awt.event.WindowAdapter() {
windowClosed: function(e) {
cpython.exec(script);
}
}
""", dict(cpython=cpython, script=set_event_script))
frame, canvas, toolbar = run_ui()
frame.addWindowListener(adapter)
frame.setVisible(True)
event.wait()
javabridge.kill_vm()
|
import time
import stacktrain.config.general as conf
import stacktrain.batch_for_windows as wbatch
# -----------------------------------------------------------------------------
# Conditional sleeping
# -----------------------------------------------------------------------------
def conditional_sleep(seconds):
# Don't sleep if we are just faking it for wbatch
if conf.do_build:
time.sleep(seconds)
if conf.wbatch:
wbatch.wbatch_sleep(seconds)
|
from flask import Flask
from flask_restful import Api, Resource
app = Flask(__name__)
api = Api(app)
names = {"tim": {"age": 19, "gender":"male"},
"bill": {"age": 70, "gender":"male"}}
class HelloWorld(Resource):
def get(self, name):
return names[name]
api.add_resource(HelloWorld, "/helloworld/<string:name>")
if __name__=="__main__":
app.run(debug=True)
|
import random
from typing import Tuple, List
import PySimpleGUI as sg
from omegaconf import DictConfig
import openFACS
PROTOTYPICAL_EXPRESSIONS_NORMALIZED: dict = {
"anger": [0.0, 0.0, 1.0, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"contempt": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
"disgust": [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.4, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"fear": [0.6, 0.0, 0.6, 0.0, 0.0, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.6, 0.6, 0.6, 0.0, 0.0],
"happiness": [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
"neutral": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"sadness": [1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"shock": [0.6, 0.6, 0.4, 0.8, 0.0, 0.4, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4, 0.0, 0.0, 0.4, 0.0, 0.0],
"surprise": [0.6, 0.6, 0.0, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.0, 0.0]
}
NUMBER_OF_ACTION_UNITS: int = 18
class OpenFacsControllerGUI(object):
def __init__(self, open_facs_interface: openFACS.OpenFacsInterface, slider_maxval: float) -> None:
self._open_facs_interface: openFACS.OpenFacsInterface = open_facs_interface
self._slider_maxval: float = slider_maxval
self._window = sg.Window(
title="OpenFACS Controller",
layout=self._get_layout(slider_maxval),
resizable=True
)
@staticmethod
def _get_layout(slider_maxval: float) -> List:
layout = [
[sg.Combo(
list(PROTOTYPICAL_EXPRESSIONS_NORMALIZED.keys()),
size=(40, 4),
enable_events=True,
key="dropdown")
],
[sg.Text("Speed", size=(10, 1)),
sg.InputText(default_text="1.0", key="speed_input", enable_events=True, size=(5, 1))
],
[sg.Text("AU 1: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU1"
),
sg.Text("inner brow raiser", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 2: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU2"
),
sg.Text("outer brow raiser", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 4: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU4"
), sg.Text("brow lowerer", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 5: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU5"
), sg.Text("upper lid raiser", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 6: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU6"
), sg.Text("cheek raiser", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 7: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU7"
), sg.Text("lid tightener", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 9: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU9"
), sg.Text("nose wrinklener", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 10: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU10"
), sg.Text("upper lip raiser", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 12: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU12"
), sg.Text("lip corner puller", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 14: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU14"
), sg.Text("dimpler", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 15: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU15"
), sg.Text("lip corner depressor", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 17: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU17"
), sg.Text("chin raiser", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 20: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU20"
), sg.Text("lip stretcher", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 23: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU23"
), sg.Text("lip tightener", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 25: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU25"
), sg.Text("lips part", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 26: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU26"
), sg.Text("jaw drop", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 28: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU28"
), sg.Text("lip suck", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text("AU 45: ", size=(6, 1), font=("Helvetica", 12), justification="c"),
sg.Slider(
range=(0, slider_maxval),
default_value=0,
resolution=0.1,
size=(20, 15),
orientation="horizontal",
font=("Helvetica", 12),
enable_events=True,
key="SAU45"
), sg.Text("blink", size=(10, 2), font=("Helvetica", 9), justification="c")],
[sg.Text(size=(50, 1), key="error_box")],
[sg.Text(size=(50, 1), key="output")],
[sg.Checkbox("Enable interactive mode:", default=False, key="interactive")],
[sg.Button("Send"), sg.Button("Reset"), sg.Button("Random"), sg.Button("Quit")]
]
return layout
@staticmethod
def read_slider_values(values) -> List[float]:
au1 = values["SAU1"]
au2 = values["SAU2"]
au4 = values["SAU4"]
au5 = values["SAU5"]
au6 = values["SAU6"]
au7 = values["SAU7"]
au9 = values["SAU9"]
au10 = values["SAU10"]
au12 = values["SAU12"]
au14 = values["SAU14"]
au15 = values["SAU15"]
au17 = values["SAU17"]
au20 = values["SAU20"]
au23 = values["SAU23"]
au25 = values["SAU25"]
au26 = values["SAU26"]
au28 = values["SAU28"]
au45 = values["SAU45"]
return [au1, au2, au4, au5, au6, au7, au9, au10, au12, au14, au15, au17, au20, au23, au25, au26, au28, au45]
def read_speed(self, values) -> float:
speed_in = values["speed_input"]
try:
speed = float(speed_in)
except ValueError:
self._window["error_box"].update(f"Inserted speed value is not valid! Using 1.0")
print(f"WARNING: Inserted speed value is not valid! Using 1.0")
speed_in = "1.0"
self._window["speed_input"].update(speed_in)
speed = float(speed_in)
return speed
def set_slider_values(self, au_list: List[float]) -> None:
self._window["SAU1"].update(au_list[0])
self._window["SAU2"].update(au_list[1])
self._window["SAU4"].update(au_list[2])
self._window["SAU5"].update(au_list[3])
self._window["SAU6"].update(au_list[4])
self._window["SAU7"].update(au_list[5])
self._window["SAU9"].update(au_list[6])
self._window["SAU10"].update(au_list[7])
self._window["SAU12"].update(au_list[8])
self._window["SAU14"].update(au_list[9])
self._window["SAU15"].update(au_list[10])
self._window["SAU17"].update(au_list[11])
self._window["SAU20"].update(au_list[12])
self._window["SAU23"].update(au_list[13])
self._window["SAU25"].update(au_list[14])
self._window["SAU26"].update(au_list[15])
self._window["SAU28"].update(au_list[16])
self._window["SAU45"].update(au_list[17])
def set_speed(self, speed_value: float) -> None:
self._window["speed_input"].update(str(speed_value))
def reset(self) -> Tuple[List[float], float]:
aus = [0.0] * NUMBER_OF_ACTION_UNITS
speed = 1.0
self.set_speed(speed_value=speed)
self.set_slider_values(au_list=aus)
return aus, speed
def generate_random_aus(self) -> List[float]:
"""
Generates from a lognormal(0, 1).
"""
random_au_values = [
random.lognormvariate(0, 1) for _ in range(NUMBER_OF_ACTION_UNITS)
]
# Adjust to the maximum value of the slider.
random_au_values = [
min(rv, self._slider_maxval) for rv in random_au_values
]
self.set_slider_values(au_list=random_au_values)
return random_au_values
def run(self) -> None:
# Display and interact with the Window using an Event Loop
while True:
event, values = self._window.read()
# Empty all text previously written
self._window["output"].update("")
self._window["error_box"].update("")
# See if user wants to quit or window was closed
if event == sg.WINDOW_CLOSED or event == "Quit":
aus, speed = self.reset()
self._window["output"].update("Successfully reset all values")
self._open_facs_interface.send_aus(au_list=aus, speed=speed)
break
elif event == "Send":
aus = self.read_slider_values(values)
speed = self.read_speed(values)
self._open_facs_interface.send_aus(au_list=aus, speed=speed)
self._window["output"].update("Message sent successfully!")
elif event == "Reset":
aus, speed = self.reset()
self._window["output"].update("Successfully reset all values")
self._open_facs_interface.send_aus(au_list=aus, speed=speed)
elif event == "Random":
aus = self.generate_random_aus()
speed = self.read_speed(values)
self._open_facs_interface.send_aus(au_list=aus, speed=speed)
self._window["output"].update("Successfully generated random values")
# If any of the sliders are moved and the interactive mode is enabled
elif event in [
"SAU1", "SAU2", "SAU4", "SAU5", "SAU6", "SAU7", "SAU9",
"SAU10", "SAU12", "SAU14", "SAU15", "SAU17",
"SAU20", "SAU23", "SAU25", "SAU26", "SAU28",
"SAU45"
] and values["interactive"] is True:
aus = self.read_slider_values(values)
speed = self.read_speed(values)
self._open_facs_interface.send_aus(au_list=aus, speed=speed)
elif event == "dropdown":
dropdown_value: str = values["dropdown"]
normalized_aus: List[float] = PROTOTYPICAL_EXPRESSIONS_NORMALIZED[dropdown_value]
aus: List[float] = [
au * self._slider_maxval for au in normalized_aus
]
speed: float = self.read_speed(values)
self.set_slider_values(au_list=aus)
self._open_facs_interface.send_aus(au_list=aus, speed=speed)
self._window["output"].update("Message sent successfully!")
# Finish up by removing from the screen
self._window.close()
def main() -> None:
UDP_IP_ADDRESS = "127.0.0.1"
UDP_PORT_NO = 5000
SLIDER_MAXVAL = 5
open_facs_interface = openFACS.OpenFacsInterface(
udp_ip_address=UDP_IP_ADDRESS,
udp_port=UDP_PORT_NO
)
open_facs_controller = OpenFacsControllerGUI(
open_facs_interface=open_facs_interface,
slider_maxval=SLIDER_MAXVAL
)
open_facs_controller.run()
if __name__ == "__main__":
main()
|
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
from models.topic import MultiTopicModel
from others.vocab_wrapper import VocabWrapper
from others.id_wrapper import VocIDWrapper
class Model(nn.Module):
def __init__(self, args, device, vocab, checkpoint=None):
super(Model, self).__init__()
self.args = args
self.device = device
self.voc_id_wrapper = VocIDWrapper('pretrain_emb/id_word2vec.voc.txt')
# Topic Model
# Using Golve or Word2vec embedding
if self.args.tokenize:
self.voc_wrapper = VocabWrapper(self.args.word_emb_mode)
self.voc_wrapper.load_emb(self.args.word_emb_path)
self.voc_emb = torch.tensor(self.voc_wrapper.get_emb())
else:
self.voc_emb = torch.empty(self.vocab_size, self.args.word_emb_size)
xavier_uniform_(self.voc_emb)
# self.voc_emb.weight = copy.deepcopy(self.encoder.model.embeddings.word_embeddings.weight)
self.topic_model = MultiTopicModel(self.voc_emb.size(0), self.voc_emb.size(-1),
args.topic_num, self.voc_emb, agent=True, cust=True)
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=True)
self.to(device)
def forward(self, batch):
all_bow, customer_bow, agent_bow = \
batch.all_bow, batch.customer_bow, batch.agent_bow
topic_loss, _ = self.topic_model(all_bow, customer_bow, agent_bow)
return topic_loss
|
import pandas
def load(path: str) -> pandas.DataFrame:
rows = []
with open(path, "r", encoding="latin-1") as data_in:
for line in data_in:
fine_category, input = line.split(None, 1)
coarse_category, _ = fine_category.split(":")
rows.append({
"question": input.strip(),
"fine_category": fine_category,
"coarse_category": coarse_category
})
return pandas.DataFrame(rows)
|
import sys
from twisted.internet import defer, reactor, error
from twisted.python.failure import Failure
from twisted.python.filepath import FilePath
from twisted.conch.endpoints import SSHCommandClientEndpoint, _CommandChannel
from twisted.internet.protocol import Factory, Protocol
from blinker import signal
from plait.task import Task
from plait.spool import SpoolingSignalProtocol, SpoolingProtocol
from plait.errors import TimeoutError, TaskError
from plait.utils import parse_host_string, QuietConsoleUI, timeout, AttributeString
# default channel does send ext bytes to protocol (stderr)
class WorkerChannel(_CommandChannel):
def extReceived(self, dataType, data):
if hasattr(self._protocol, 'extReceived'):
self._protocol.extReceived(dataType, data)
# endpoint that utilizes channel above
class WorkerEndpoint(SSHCommandClientEndpoint):
commandConnected = defer.Deferred()
def _executeCommand(self, connection, protocolFactory):
commandConnected = defer.Deferred()
def disconnectOnFailure(passthrough):
immediate = passthrough.check(defer.CancelledError)
self._creator.cleanupConnection(connection, immediate)
return passthrough
commandConnected.addErrback(disconnectOnFailure)
channel = WorkerChannel(
self._creator, self._command, protocolFactory, commandConnected)
connection.openChannel(channel)
return commandConnected
class PlaitWorker(Factory):
"""
Executes a sequence of tasks against a remote host.
When run, an initial SSH connection is established to the remote host.
For efficiency's sake, all subsequent remote operations reuse the
same connection and execute over a new channel.
Each task is executed in a daemon thread which will be killed when the
main thread exits. When the task runs a remote operation it blocks on
a call on the worker inside the main reactor thread where the network
operations are negotiated. The result is then returned to the thread
and it resumes execution.
There are a number of signals emitted for workers:
- timeout : seconds
- fail : failure
- connect : user, host, port
- task_start : task
- task_end : result
- stdout : line
- stderr : line
- complete :
"""
def __init__(self, tasks, keys, agent, known_hosts, timeout, all_tasks=False):
self.proto = None
self.host_string = None
self.user = None
self.host = None
self.port = None
self.tasks = tasks
self.keys = keys
self.agent = None
self.known_hosts = None
self.timeout = timeout
self.all_tasks = all_tasks
self.lines = 0
self.tasks_by_uid = dict()
def __str__(self):
return self.host_string
def buildProtocol(self, addr):
# construct protocol and wire up io signals
self.protocol = SpoolingSignalProtocol('stdout', 'stderr', sender=self.host_string)
return self.protocol
def makeConnectEndpoint(self):
"""
Endpoint for initial SSH host connection.
"""
return WorkerEndpoint.newConnection(
reactor, b"cat",
self.user, self.host, self.port,
keys=self.keys, agentEndpoint=None,
knownHosts=None, ui=QuietConsoleUI())
def makeCommandEndpoint(self, command):
"""
Endpoint for remotely executing operations.
"""
return WorkerEndpoint.existingConnection(
self.protocol.transport.conn, command.encode('utf8'))
@defer.inlineCallbacks
def connect(self, host_string):
"""
Establish initial SSH connection to remote host.
"""
self.parse_host_string(host_string)
endpoint = self.makeConnectEndpoint()
yield timeout(self.timeout, endpoint.connect(self))
signal('worker_connect').send(self)
def parse_host_string(self, host_string):
self.host_string = host_string
self.user, self.host, self.port = parse_host_string(host_string)
@property
def label(self):
return "{}@{}".format(self.user, self.host)
def stdout(self, thread_name, data=None):
task = self.tasks_by_uid[thread_name]
task.has_output = True
signal('worker_stdout').send(self, data=data)
def stderr(self, thread_name, data=None):
task = self.tasks_by_uid[thread_name]
task.has_output = True
signal('worker_stderr').send(self, data=data)
def runTask(self, task):
# listen to the output of this task
signal('stdout').connect(self.stdout, sender=task.uid)
signal('stderr').connect(self.stderr, sender=task.uid)
# signal that the task has begun
signal('task_start').send(self, task=task)
return task.run()
@defer.inlineCallbacks
def run(self):
"""
Execute each task in a Task thread.
"""
# execute each task in sequence
for name, func, args, kwargs in self.tasks:
task = Task(self, name, func, args, kwargs)
self.tasks_by_uid[task.uid] = task
result = yield self.runTask(task)
# tasks will return an Exception is there was a failure
if isinstance(result, BaseException):
# wrap it so the runner recognizes this as an expected exception
# and doesn't emit generic worker exception signals
e = TaskError("Task `{}` failed.".format(name))
e.task = task
e.failure = result
raise e
# otherwise it may optionally return a completion value
elif self.all_tasks and not (result or task.has_output):
e = TaskError("Task returned empty result.")
e.task = task
e.failure = e
raise e
else:
signal('task_finish').send(self, task=task, result=result)
@defer.inlineCallbacks
def execFromThread(self, command):
"""
API for tasks to execute ssh commands.
"""
ep = self.makeCommandEndpoint(command)
yield ep.connect(self)
failed = False
try:
yield self.protocol.finished
except error.ProcessTerminated as e:
failed = True
# flush output from proto accumulated during execution
stdout, stderr = self.protocol.flush()
result = AttributeString(stdout)
result.stderr = stderr
result.failed = failed
result.succeeded = not failed
result.command = command
defer.returnValue(result)
|
from mock import MagicMock, patch
from tornado_sqlalchemy_login.utils import (
parse_body,
construct_path,
safe_get,
safe_post,
safe_post_cookies,
)
def foo(*args, **kwargs):
raise ConnectionRefusedError()
class TestUtils:
def test_parse_body(self):
m = MagicMock()
m.body = "{}"
parse_body(m)
m.body = ""
parse_body(m)
def test_constructPath(self):
assert construct_path("test", "test") == "test"
def test_safe_get(self):
with patch("requests.get") as m:
m.return_value = MagicMock()
m.return_value.text = "{}"
assert safe_get("test") == {}
m.side_effect = foo
assert safe_get("test") == {}
def test_safe_post(self):
with patch("requests.post") as m:
m.return_value = MagicMock()
m.return_value.text = "{}"
assert safe_post("test") == {}
m.side_effect = foo
assert safe_post("test") == {}
def test_safe_post_cookies(self):
with patch("requests.post") as m:
m.return_value = MagicMock()
m.return_value.text = "{}"
assert safe_post_cookies("test")[0] == {}
m.side_effect = foo
assert safe_post_cookies("test")[0] == {}
|
def count_substring(string, sub_string):
start_list = [i for i, e in enumerate(string) if e == sub_string[0]]
result = 0
for idx in start_list:
if idx + len(sub_string) <= len(string):
if string[idx:idx+len(sub_string)] == sub_string:
result = result + 1
return result
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from osintsan import menu
from osintsan import main1
from plugins.maildb import maildb
from plugins.macaddress import MacAddressLookup
from prompt_toolkit import prompt
from plugins.bear import google
# Банеры
from module.utils.banner import show_banner
from module.utils import COLORS
from module.utils.ban import page_1
from module.utils.ban import page_2
from module.utils.ban import page_3
from module.utils.ban import page_4
import subprocess
# Импорты
import os
import webbrowser
import time as t
import requests
from grab import Grab
from bs4 import BeautifulSoup
import pandas as pd
# Developer by Bafomet
def repl(): # Read\xe2\x80\x93eval\xe2\x80\x93print loop
while True:
print(menu())
choice = None
while True:
try:
user_input = input(f"{COLORS.REDL} └──>{COLORS.GNSL} Выберите номер опции: {COLORS.ENDL}")
print()
except KeyboardInterrupt:
return
if len(user_input) == 0:
break
try:
choice = int(user_input)
except ValueError:
print(f"{COLORS.REDL}Неверный ввод!{COLORS.ENDL}")
else:
break
if choice is None:
continue
if choice == 1:
from plugins.shodan_io import shodan_host, check_shodan_api
from plugins.censys import censys_ip
if not check_shodan_api():
show_banner(clear=True)
print(f"{COLORS.REDL}API ключ Shodan'а невалиден! (settings.py){COLORS.REDL}")
else:
print()
ip = input(" └──> Введите IP адрес : ")
show_banner(clear=True)
shodan_host(ip)
censys_ip(ip)
elif choice == 2:
from plugins.domain import domain
host = input(" └──> Введите доменное имя : ")
port = ""
while True:
try:
print()
port = input(" └──> Нажмите enter, или напишите свой варианта порта : ")
port = int(port)
except ValueError:
if port == "":
port = 80
else:
continue
if port not in [80, 443]:
print(" Неверный порт ")
continue
else:
break
try:
domain(host, port)
finally:
show_banner(clear=True)
elif choice == 3:
os.system('clear')
print(page_1)
option = input(f" {COLORS.REDL} └──> {COLORS.WHSL} Выберите вариант запуска: {COLORS.GNSL}")
if option == "1":
from plugins.Phonenumber import phone_number, check_phone_api_token
if not check_phone_api_token():
print(f"{COLORS.REDL}phone api невалиден! (settings.py){COLORS.REDL}")
else:
ph = input(f"\n{COLORS.REDL} └──>{COLORS.GNSL} Введи номер телефона с +7: {COLORS.WHSL}")
show_banner(clear=True)
phone_number(ph)
elif option == "2":
g = Grab()
print(f'\n {COLORS.WHSL}Пример: {COLORS.GNSL}9262063265\n')
number = input(f" {COLORS.REDL}[ {COLORS.GNSL}+ {COLORS.REDL}] {COLORS.WHSL}Введите номер телефона, без кода страны:{COLORS.GNSL} ")
g.go('http://phoneradar.ru/phone/' + number)
try:
operator = g.doc.select('//*[@class="table"]/tbody/tr[1]/td[2]').text()
region = g.doc.select('//*[@class="table"]/tbody/tr[2]/td[2]').text()
sity = g.doc.select('//*[@class="table"]/tbody/tr[3]/td[2]/a').text()
search_number = g.doc.select('//*[@class="table"]/tbody/tr[4]/td[2]').text()
views_number = g.doc.select('//*[@class="table"]/tbody/tr[5]/td[2]').text()
positive_reviews = g.doc.select('//*[@class="table"]/tbody/tr[6]/td[2]').text()
negative_reviews = g.doc.select('//*[@class="table"]/tbody/tr[7]/td[2]').text()
neutral_reviews = g.doc.select('//*[@class="table"]/tbody/tr[8]/td[2]').text()
print(f"\n{COLORS.REDL} Базовые данные о номере:\n")
print(f" {COLORS.WHSL}Оператор :{COLORS.GNSL} {operator}")
print(f" {COLORS.WHSL}Регион :{COLORS.GNSL} {region}")
print(f" {COLORS.WHSL}Город :{COLORS.GNSL} {sity}")
print(f" {COLORS.WHSL}Поисков номера :{COLORS.GNSL} {search_number}")
print(f" {COLORS.WHSL}Просмотров номера :{COLORS.GNSL} {views_number}")
print(f" {COLORS.WHSL}Положительные отзывы:{COLORS.GNSL} {positive_reviews}")
print(f" {COLORS.WHSL}Отрицательные отзывы:{COLORS.GNSL} {negative_reviews}")
print(f" {COLORS.WHSL}Нейтральные отзывы :{COLORS.GNSL} {neutral_reviews}")
print(f" \n{COLORS.WHSL} Обязательно оставляйте отзывы о номере")
print(f' {COLORS.WHSL}Нам важен каждый отзыв )')
print(f" \n{COLORS.REDL} Комментарии к номеру\n")
try:
for elem in g.doc.select('//*[@class="card-body"]/div[3]/div'):
review = elem.select('div[2]').text()
print(f'\n {COLORS.GNSL}[{review}]{COLORS.WHSL}')
print(f' {COLORS.REDL}-------------------------------------------------------------------------')
except:
print(f" {COLORS.WHSL}Отзывов не найдено")
except:
print(f' {COLORS.WHSL}Информация не найдена')
print(f'\n{COLORS.WHSL} Второй уровень комментариев:')
print(f' {COLORS.REDL}-------------------------------------------------------------------------')
g.go(f'https://po-nomeru.ru/phone/{number}/')
try:
for elem in g.doc.select('//*[@class="row"]/blockquote'):
print(1)
avtor_review = elem.select('h3').text()
review = elem.select('p').text()
print(f' \n{COLORS.GNSL} [{avtor_review}: {review}]\n')
print(f' {COLORS.REDL}-------------------------------------------------------------------------{COLORS.WHSL}')
if not avtor_review:
print(f" {COLORS.WHSL}Отзывов не найдено\n")
except:
print(f"Нет отзывов!\n")
print(f' \n Вы желаете оставить отзыв о номере ?\n')
print(f' {COLORS.REDL}[ {COLORS.GNSL}1 {COLORS.REDL}] - {COLORS.WHSL}Да {COLORS.REDL}[ {COLORS.GNSL}2 {COLORS.REDL}] -{COLORS.WHSL} Нет\n')
zapros = input(f' {COLORS.WHSL}\n Введите опцию: {COLORS.GNSL}')
print('')
if zapros == '1':
show_banner(clear=True)
print(f' {COLORS.REDL}-------------------------------------------------------------------------')
print(f' {COLORS.WHSL}Используйте любое имя, отзыв будет оставлен анонимно')
print(f' {COLORS.WHSL}Но я бы на вашем месте не отказался от proxy/vpn')
name = input(f'\n {COLORS.WHSL}Введите ваше имя: {COLORS.GNSL}')
message = input(f'\n {COLORS.WHSL}Введите ваш отзыв о владельце номера:{COLORS.GNSL} ')
rating = input(f'\n {COLORS.GNSL}Выберите рейтинг человека:\n'
f'{COLORS.REDL} [ {COLORS.GNSL}1 {COLORS.REDL}] -{COLORS.WHSL} Положительный, можно звонить и отвечать на звонок.\n'
f'{COLORS.REDL} [ {COLORS.GNSL}2 {COLORS.REDL}] -{COLORS.WHSL} Отрицательный, не отвечать на звонок с этого номера.\n'
f'{COLORS.REDL} [ {COLORS.GNSL}3 {COLORS.REDL}] -{COLORS.WHSL} Нейтральный.\n Введите рейтинг: ')
g.setup(headers={'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://po-nomeru.ru'})
g.setup(post={'name': name,
'message': message,
'rating': rating,
'number': number,
'action': 'addReview'})
g.go('https://po-nomeru.ru/comments/')
print(f' \n{COLORS.REDL} Поздравляю !!! Ваш отзыв добавлен. ')
else:
pass
show_banner(clear=True)
elif choice == 4:
from plugins.dnsdump import dnsmap
print("\n Работает только с (.com .ru)\n")
dnsmap_inp = input(" └──> Введите url : ")
show_banner(clear=True)
dnsmap(dnsmap_inp)
elif choice == 5:
from plugins.metadata import gps_analyzer
print("\n Пример пути: /home/bafomet/Desktop/deanon.png\n")
img_path = input(" └──> Укажите путь до фотографии :")
show_banner(clear=True)
gps_analyzer(img_path)
elif choice == 6:
from plugins.reverseimagesearch import reverseimagesearch
print("\n Пример пути: /home/bafomet/Desktop/deanon.png\n")
img = input(" └──> Укажите путь до фотографии :")
show_banner(clear=True)
reverseimagesearch(img)
elif choice == 7:
from plugins.shodan_io import check_shodan_api
from plugins.honeypot import honeypot
if not check_shodan_api():
show_banner(clear=True)
print(f"{COLORS.REDL}`shodan_api` не валиден, поправь в settings.py токен!{COLORS.REDL}")
else:
print()
hp_inp = input(" └──> Введите IP адрес : ")
show_banner(clear=True)
honeypot(hp_inp)
elif choice == 8:
while 1:
show_banner(clear=True)
print("")
mac = prompt(" └──> Введите mac address: ")
break
MacAddressLookup(mac)
continue
elif choice == 9:
from module.gui import run_gui
run_gui()
show_banner(clear=True)
elif choice == 10:
from plugins.torrent import torrent
ip_ = input(" └──> Введите IP адрес :")
show_banner(clear=True)
torrent(ip_)
elif choice == 11:
from module.instagram.instagram_search import search_through_instagram
search_through_instagram()
show_banner(clear=True)
elif choice == 12:
from module.subzone import subzone
subzone()
show_banner(clear=True)
elif choice == 13:
while 1:
print("")
print(" Пример :google.com")
print("")
web = prompt(" └──> Введи домен организации :")
show_banner(clear=True)
break
maildb(web)
continue
elif choice == 14:
from module import android_debug
show_banner(clear=True)
print(f' Проверка и установка зависимостей')
t.sleep(3)
os.system("sudo apt-get install android-tools-adb")
os.system("sudo apt install android-tools-adb android-tools-fastboot")
t.sleep(5)
android_debug.android_debug()
show_banner(clear=True)
elif choice == 15:
os.system("cd module;python3 dlc.py -t manual -k start")
show_banner(clear=True)
elif choice == 16:
subprocess.call("cd module;python3 shodan_module.py", shell=True)
show_banner(clear=True)
elif choice == 17:
os.system("cd module;python3 zoom.py")
show_banner(clear=True)
break
continue
elif choice == 18:
os.system('cd module;python3 identity.py')
menu()
elif choice == 19:
# Это дополнительный модуль
show_banner(clear=True)
print(page_4)
os.system("git clone https://github.com/soxoj/maigret")
elif choice == 20:
urls = [
"https://search4faces.com",
"https://findclone.ru",
"https://images.google.com",
"https://yandex.ru/images",
"https://tineye.com",
"https://pimeyes.com/en",
"https://carnet.ai",
]
for url in urls:
webbrowser.open(url)
show_banner(clear=True)
elif choice == 21:
from module.Information_services import information_menu
information_menu()
show_banner(clear=True)
elif choice == 22:
webbrowser.open("https://canarytokens.org")
show_banner(clear=True)
elif choice == 23:
urls = [
"https://protonmail.com/ru",
"https://tutanota.com/ru/blog/posts/anonymous-email",
]
for url in urls:
webbrowser.open(url)
show_banner(clear=True)
elif choice == 24:
os.system('clear')
print(page_3)
elif choice == 25:
os.system("cd plugins/xss;python2 xss.py")
elif choice == 26:
os.system('clear')
print(page_2)
elif choice == 27:
os.system("git clone https://github.com/Bafomet666/osint-info")
show_banner(clear=True)
elif choice == 28:
subprocess.call("sudo maltego", shell=True)
show_banner(clear=True)
elif choice == 29:
show_banner(clear=True)
print(f"{COLORS.GNSL}-----------------------------------------------------------------------------------")
bitc_addr = input(f"{COLORS.ENDL} Введите Bitcoin Address:{COLORS.WHSL} ")
print(f"{COLORS.GNSL}-----------------------------------------------------------------------------------")
print(f" {COLORS.WHSL}Ожидайте, загрузка страницы\n")
print(f" {COLORS.WHSL}Полученные данные будут сохранены в папку OSINT-SAN\n {COLORS.GNSL}Если вдруг выйдет ошибка Empty DataFrame, вам нужно будет сменить proxy")
url = 'https://www.walletexplorer.com/wallet/da9f4a0243cbd429?from_address={}'.format(bitc_addr)
result = requests.get(url)
soup = BeautifulSoup(result.text.encode('utf-8'), 'lxml')
if soup.find('div', class_='saveas'):
href = soup.find('div', class_='saveas').find('a').get('href')
file_url = requests.get('https://www.walletexplorer.com' + href)
with open('btc_result.csv', 'wb') as f:
f.write(file_url.content)
df = pd.read_csv('btc_result.csv')
pd.options.display.max_columns = len(df.columns)
print(df)
print(f"{COLORS.GNSL}-------------------------------------------------------------------------------")
else:
print(f'{COLORS.WHSL} Превышен лимит запросов!\n Используй прокси для дальнейшего использования.')
menu()
return
elif choice == 30:
show_banner(clear=True)
google()
elif choice == 66:
show_banner(clear=True)
elif choice == 99:
os.system('python3 osintsan.py')
elif choice == 00:
print(f' {COLORS.GNSL}Благодарим вас за использование !!! Вы прекрасны.\n')
print(f' Проверяем запущенные под процессы, дайте нам пару секунд')
t.sleep(3)
print(f' \n{COLORS.REDL} Закрываем все под процессы\n')
t.sleep(3)
os.system('pkill -9 -f osintsan.py')
exit()
else:
exit()
os.system("clear")
print(f"{COLORS.REDL} Опции такой нет, дурак!{COLORS.ENDL}")
if __name__ == '__main__':
try:
repl()
except KeyboardInterrupt:
os.system("clear")
|
from __future__ import annotations
import uvicore
import inspect
from copy import copy
from functools import partial
from uvicore.contracts import Router as RouterInterface
from uvicore.contracts import Package as PackageInterface
from uvicore.contracts import Routes as RoutesInterface
from uvicore.support.module import load
from uvicore.support import str as string
from uvicore.support.dumper import dump, dd
from uvicore.http.routing.guard import Guard
from uvicore.support.dictionary import deep_merge
from uvicore.typing import Dict, Callable, List, TypeVar, Generic, Decorator, Optional, Union
# Generic Route (Web or Api)
R = TypeVar('R')
@uvicore.service()
class Router(Generic[R], RouterInterface[R]):
"""Abstract base router class for Web and Api Router Implimentations"""
@property
def package(self) -> PackageInterface:
return self._package
@property
def routes(self) -> Dict[str, R]:
return self._routes
def __init__(self, package: PackageInterface, prefix: str, name: str = '', controllers: str = None):
# Instance variables
self._package = package
self._routes = Dict()
self._tmp_routes = Dict()
self.controllers: str = controllers
# Prefix
if prefix == '/': prefix = ''
if prefix:
if prefix[-1] == '/': prefix = prefix[0:-1]
if prefix[0] != '/': prefix = '/' + prefix
self.prefix = prefix
# Clean name
if name:
name = name.replace('/', '.')
if name[-1] == '.': name = name[0:-1] # Remove trailing .
if name[0] == '.': name = name[1:] # Remove beginning .
self.name = name
def controller(self,
module: Union[str, Callable],
*,
prefix: str = '',
name: str = '',
tags: Optional[List[str]] = None,
options: Dict = {}
) -> List:
"""Include a Route Controller"""
if prefix:
if prefix[-1] == '/': prefix = prefix[0:-1] # Remove trailing /
if prefix[0] != '/': prefix = '/' + prefix # Add beginning /
# Get name
if not name: name = prefix
# Clean name
if name:
name = name.replace('/', '.')
if name[-1] == '.': name = name[0:-1] # Remove trailing .
if name[0] == '.': name = name[1:] # Remove beginning .
# Import controller module from string
cls = module
if type(module) == str:
if self.controllers:
if '.' not in module:
# We are defining just 'home', so we add .Home class
module = self.controllers + '.' + module + '.' + string.studly(module)
elif module[0] == '.':
# We are appending the path to self.controllers
# Must have class too, ex: .folder.stuff.Stuff
module = self.controllers + module
# elif module.count('.') == 1: # NO, we'll just add a . before, like .home.Home and it does the same thing
# # We are defining the file and the class (home.Home)
# # Only works with ONE dot. If you want to append use .folder.stuff.Stuff
# module = self.controllers + '.' + module
else:
# We are defining the FULL module path even though we have defined a self.controller path
# Must have class too, ex: acme.appstub.http.api.stuff.Stuff
pass
# Dynamically import the calculated module
cls = load(module).object
if str(type(cls)) == "<class 'module'>":
# Trying to load a module, but we want the class inside the module, auto add
module = module + '.' + string.studly(module.split('.')[-1])
cls = load(module).object
# Instantiate controller file
controller: Routes = cls(self.package, **options)
# New self (Web or Api) router instance
router = self.__class__(self.package, self.prefix + prefix, self.name + '.' + name, self.controllers)
# Register controllers routes and return new updated router
router = controller.register(router)
# Add contoller class level attributes as middleware to each route on this controller
controller_middlewares = controller._middleware()
if controller_middlewares:
for route in router.routes.values():
(route.middleware, route.endpoint) = self._merge_route_middleware(controller_middlewares, route.middleware, route.endpoint)
# Merge controllers routes into this main (parent of recursion) router
#dump(router.routes)
self.routes.merge(router.routes)
# Return just this controllers routes as a list
routes = []
for route in router.routes.keys():
# Append .controller(tags=[xyz]) if exists
if tags:
if router.routes[route].tags is None: router.routes[route].tags = []
router.routes[route].tags.extend(tags)
routes.append(router.routes[route])
return routes
def include(self,
module: Union[str, Callable],
*,
prefix: str = '',
name: str = '',
tags: Optional[List[str]] = None,
options: Dict = {}
) -> List:
"""Alias to controller"""
self.controller(module=module, prefix=prefix, name=name, tags=tags, options=options)
def group(self, prefix: str = '', *,
routes: Optional[List] = None,
name: str = '',
tags: Optional[List[str]] = None,
autoprefix: bool = True,
middleware: Optional[List] = None,
auth: Optional[Guard] = None,
scopes: Optional[List] = None,
) -> Callable[[Decorator], Decorator]:
"""Route groups method and decorator"""
# Convert auth helper param to middleware
if middleware is None: middleware = []
if auth: middleware.append(auth)
# Convert scopes to Guard route middleware
if scopes:
middleware.append(Guard(scopes))
# Get name
if not name: name = prefix
# Clean name
if name:
name = name.replace('/', '.')
if name[-1] == '.': name = name[0:-1] # Remove trailing .
if name[0] == '.': name = name[1:] # Remove beginning .
def handle(routes):
# Controllers return multiple routes as a List, so flatten everything into one List
all_routes = []
for route in routes:
if type(route) == list:
all_routes.extend(route)
else:
all_routes.append(route)
# New routes with updated prefixes
new_routes = []
# Loop group routes and update prefixes
for route in all_routes:
# Remove old route from self.routes
if route.name in self.routes:
self.routes.pop(route.name)
# Strip global prefix if exists
path = route.path
if len(path) >= len(self.prefix) and path[0:len(self.prefix)] == self.prefix:
path = path[len(self.prefix):]
rname = route.name
if len(rname) >= len(self.name) and rname[0:len(self.name)] == self.name:
rname = rname[len(self.name) + 1:]
full_path = prefix + path
full_name = name + '.' + rname
# Get route middleware based on parent child overrides
(route_middleware, route.endpoint) = self._merge_route_middleware(middleware, route.middleware, route.endpoint)
#dump(route, middleware, route.middleware)
# Add new route with new group prefix and name
# Because this is a polymorphic router (works for Web and API router)
# The self.add methods will be different. The actual route should mimic the self.add
# parameters, so modify the route when calculated values and pass in as
# self.add **kwargs
if tags:
if route.tags is None: route.tags = []
route.tags.extend(tags)
route.path = full_path
route.name = full_name
# Override group level autoprefix only if False to disable all, else use what the inside route used
if autoprefix == False: route.autoprefix = autoprefix
route.middleware = route_middleware
del route.original_path
del route.original_name
# NO, this didn't handle polymorphic router. self.add is different for each router.
# new_route = self.add(
# path=full_path,
# endpoint=route.endpoint,
# methods=route.methods,
# name=full_name,
# autoprefix=autoprefix,
# #middleware=route.middleware or middleware # Closest one to the route wins
# middleware=route_middleware
# )
new_route = self.add(**route)
new_routes.append(new_route)
# Return new routes for recursive nested groups
return new_routes
# Method access
if routes: return handle(routes)
# Decorator access
def decorator(func: Decorator) -> Decorator:
# Backup and clear existing routes
all_routes = self._routes.clone()
self._routes = Dict()
# Get routes from the group method
func()
# Build routes list from group method
routes = []
for route in self._routes.values():
routes.append(route)
# Restore all routes besides the ones in the gruop method
self._routes = all_routes.clone()
# Add these routes to the proper group
handle(routes)
return func
return decorator
def _merge_route_middleware(self, parent_middleware: List, child_middleware: List, endpoint: Callable) -> Tuple[List, Callable]:
"""Merge parent route middleware into child, children parameters win in merge.
If endpoing method is middleware, merge that also and return a new partial endpoint
"""
# Merge helper
def merge(parent_middleware, child_middleware):
# Get middleware __init__ params
inspection = inspect.signature(parent_middleware.__init__)
params = [x for x in inspection.parameters]
# Build params key value Dict
parent_params = Dict()
child_params = Dict()
for param in params:
# If param value is None do NOT add to kwargs or even the None will win in a merge
if getattr(parent_middleware, param) is not None:
parent_params[param] = getattr(parent_middleware, param)
if getattr(child_middleware, param) is not None:
child_params[param] = getattr(child_middleware, param)
# Deep merge params
kwargs = deep_merge(child_params, parent_params, merge_lists=True)
# Instantiate new middleware with new params
# We can't just set child_middleware new dict values becuase it has already fired up
# its super.__init__, so changing values now does nothing. Instead we replace it with an all
# new instantiated Guard
new_middleware = parent_middleware.__class__(**kwargs)
return new_middleware
# New merged middleware
middlewares = []
# Both parent and child have middleware. If both have the same middleware,
# create a new middleware based on a merge of parameters where CHILD params WIN.
if parent_middleware and child_middleware:
for child_middleware in child_middleware:
found = False
for parent_middleware in parent_middleware:
if str(parent_middleware) == str(child_middleware):
found = True
break
if not found:
# No matching parent middleware, use child
middlewares.append(child_middleware)
else:
# Found matching parent and child middleware. Create new middleware with merged parameters of the two
# Because of the break statement, the current parent_middleware variable is the match
middlewares.append(merge(parent_middleware, child_middleware))
elif parent_middleware:
# We have parent middleware, no children
middlewares = copy(parent_middleware)
elif child_middleware:
# We have child middleware, no parent
middlewares = copy(child_middleware)
# Check each endpoints params and look at its default value
# Compare that default value to our list of middleware
# If they are the same, REMOVE the middleware from our list
# or we will trigger the same middleware twice. Once in a group/controller
# and once on the actual function parameter (with FastAPI Depends) as well.
final_middleware = []
if middlewares:
# Get endpoing method signature end parameters
endpoint_signature = inspect.signature(endpoint)
endpoint_params = endpoint_signature.parameters
# Loop all merged parent/child middleware
for middleware in middlewares:
add = True
# Loop each endpoint parameter
for endpoint_param in endpoint_params.values():
# Endpoint middleware will be the default value of a parameter
endpoint_middleware = endpoint_param.default
# Compare the endpoint default value with the current middleware
# If they are the same, this endpint parameter is middleware and matches
# on of our previously defined merged middlewares.
# If we find a match do NOT add current middleware to final_middleware to eliminate
# duplicate middleware being run twice.
if str(middleware) == str(endpoint_middleware):
# Do NOT add this middleware to final_middleware
add = False
# Merge endpoint middleware to higher level matched middleware
# This will merge scopes and other parameters to provide scope hierarchy
new_endpoint_middleware = merge(middleware, endpoint_middleware)
# Create a partial endpoint method with our new middleware parameter injected
# Partials see https://levelup.gitconnected.com/changing-pythons-original-behavior-8a43b7d1c55d
endpoint = partial(endpoint, **{
endpoint_param.name: new_endpoint_middleware
})
# There will not be another endpoint param matching this one middleware, break
break
if add:
# No endpoint middleware found, add current middleware
final_middleware.append(middleware)
# Return new merged middleware and modified partial endpoint
return (final_middleware, endpoint)
def _merge_route_middlewareOLD(self, parent_middleware: List, child_middleware: List) -> List:
"""Merge parent route middleware into child, children parameters win in merge."""
# Parent has no middleware to merge into child
if not parent_middleware: return child_middleware
# if not child_middleware:
# # Child has no middleware of its own, add parent level middleware to this route
# return copy(parent_middleware)
# Merge helper
def merge(parent_middleware, child_middleware):
# Get middleware __init__ params
inspection = inspect.signature(parent_middleware.__init__)
params = [x for x in inspection.parameters]
# Build params key value Dict
parent_params = Dict()
child_params = Dict()
for param in params:
# If param value is None do NOT add to kwargs or even the None will win in a merge
if getattr(parent_middleware, param) is not None:
parent_params[param] = getattr(parent_middleware, param)
if getattr(child_middleware, param) is not None:
child_params[param] = getattr(child_middleware, param)
# Deep merge params
kwargs = deep_merge(child_params, parent_params, merge_lists=True)
# Instantiate new middleware with new params
# We can't just set child_middleware new dict values becuase it has already fired up
# its super.__init__, so changing values now does nothing. Instead we replace it with an all
# new instantiated Guard
new_middleware = parent_middleware.__class__(**kwargs)
return new_middleware
# Both parent and child have middleware. If both have the same middleware,
# create a new middleware based on a merge of parameters where CHILD params WIN.
middlewares = []
if child_middleware:
for child_middleware in child_middleware:
found = False
for parent_middleware in parent_middleware:
if str(parent_middleware) == str(child_middleware):
found = True
break
if not found:
# No matching controller middleware found
middlewares.append(child_middleware)
else:
# Found matching controller and route middleware. Create new middleware with merged parameters of the two
# Because of the break, the current parent_middleware variable is the match
middlewares.append(merge(parent_middleware, child_middleware))
else:
middlewares = copy(parent_middleware)
return middlewares
def _format_path_name(self, path: str, name: str, autoprefix: bool, methods: List):
# Clean path
if path and path[-1] == '/': path = path[0:-1] # Remove trailing /
if path and path[0] != '/': path = '/' + path # Add beginning /
# Get name
no_name = False
if not name:
# We have no explicit name, derive name from path including params or its not unique
no_name = True
name = path.replace('{', '').replace('}', '')
# Clean name
name = name.replace('/', '.')
if name and name[-1] == '.': name = name[0:-1] # Remove trailing .
if name and name[0] == '.': name = name[1:] # Remove beginning .
if not name: name = 'root'
# Autoprefix path and name
# Note that route "name" is for URL linking
# This does NOT use the global URL prefix because that can change by the user running the package
# Instead we use the actual package name for the name prefix
full_path = path
full_name = name
if autoprefix:
full_path = self.prefix + full_path
full_name = self.name + '.' + full_name
if not full_path: full_path = '/'
# To prevent duplicates of same route for GET, POST, PUT, PATCH, DELETE
# suffix the name with -POST, -PUT... But do not suffix for GET
# If this is a multi method function, only add POST-PUT combination of no name is forced
methods = methods.copy()
if 'GET' in methods: methods.remove('GET')
if methods:
if no_name or (no_name == False and len(methods) == 1):
methods = sorted(methods)
method_suffix = '-' + '-'.join(methods)
if full_name[-len(method_suffix):] != method_suffix: full_name = full_name + method_suffix
# Return newly formatted values
return (path, full_path, name, full_name)
@uvicore.service()
class Routes(RoutesInterface):
"""Routes and Controller Class"""
# middleware = None
# auth = None
# scopes = None
@property
def package(self) -> PackageInterface:
return self._package
def __init__(self, package: PackageInterface):
self._package = package
def _middleware(self):
# Get class level middleware
middlewares = []
if self.auth: middlewares.append(self.auth)
if self.middleware: middlewares.extend(self.middleware)
if self.scopes: middlewares.append(Guard(self.scopes))
return middlewares
# if '__annotations__' in self.__class__.__dict__:
# for key, value in self.__class__.__annotations__.items():
# middlewares.append(getattr(self, key))
# return middlewares
|
import json
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
db = SQLAlchemy()
association_table = db.Table(
'usergroups', db.metadata,
db.Column('uid', db.Integer, db.ForeignKey('users.id')),
db.Column('gid', db.Integer, db.ForeignKey('groups.id'))
)
class User(db.Model):
"""This class represents the users table"""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
group_memberships = relationship(
'Group', secondary=association_table, back_populates='member_users')
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
userid = db.Column(db.String(32))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp()
)
def __init__(self, userid):
"""initialize with userid"""
self.userid = userid
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return User.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return '<User: {}>'.format(self.userid)
def as_dict(self):
return {
'first_name': self.first_name,
'last_name': self.last_name,
'userid': self.userid,
'groups': [
group.as_dict()['groupid'] for group in self.group_memberships
],
'date_created': self.date_created,
'date_modified': self.date_modified
}
def json(self):
return json.dumps(self.as_dict())
class Group(db.Model):
"""This class represents the groups table"""
__tablename__ = 'groups'
id = db.Column(db.Integer, primary_key=True)
member_users = relationship(
'User', secondary=association_table,
back_populates='group_memberships')
groupid = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp()
)
def __init__(self, groupid):
"""initialize with groupid"""
self.groupid = groupid
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Group.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return '<Group: {}>'.format(self.groupid)
def as_dict(self):
return {
'groupid': self.groupid,
'users': [user.as_dict() for user in self.member_users],
}
def json(self):
return json.dumps(self.as_dict())
|
# coding=utf-8
"""
Inception-v3, model from the paper:
"Rethinking the Inception Architecture for Computer Vision"
http://arxiv.org/abs/1512.00567
Original source:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/imagenet/classify_image.py
License: http://www.apache.org/licenses/LICENSE-2.0
Download pretrained weights from:
https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/inception_v3.pkl
"""
import lasagne
import theano
import theano.tensor as t
from lasagne.layers import ConcatLayer, Conv2DLayer, DenseLayer, GlobalPoolLayer
from lasagne.layers import InputLayer, Pool2DLayer
from lasagne.layers.normalization import batch_norm
from lasagne.nonlinearities import softmax
def preprocess(im):
# Expected input: RGB uint8 image
# Input to network should be bc01, 299x299 pixels, scaled to [-1, 1].
"""
Parameters
----------
im :
Returns
-------
"""
import skimage.transform
import numpy as np
im = skimage.transform.resize(im, (299, 299), preserve_range=True)
im = np.divide((im-128), 128)
im = np.rollaxis(np.array(im), 2)[np.newaxis].astype('float32')
return im
def bn_conv(input_layer, **kwargs):
"""
Parameters
----------
input_layer :
kwargs :
Returns
-------
"""
l = Conv2DLayer(input_layer, **kwargs)
l = batch_norm(l, epsilon=0.001)
return l
def inception_a(input_layer, nfilt):
# Corresponds to a modified version of figure 5 in the paper
"""
Parameters
----------
input_layer :
nfilt :
Returns
-------
"""
l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=5, pad=2)
l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1)
l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=3, pad=1)
l4 = Pool2DLayer(
input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad')
l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)
return ConcatLayer([l1, l2, l3, l4])
def inception_b(input_layer, nfilt):
# Corresponds to a modified version of figure 10 in the paper
"""
Parameters
----------
input_layer :
nfilt :
Returns
-------
"""
l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2)
l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1)
l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2)
l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)
return ConcatLayer([l1, l2, l3])
def inception_c(input_layer, nfilt):
# Corresponds to figure 6 in the paper
"""
Parameters
----------
input_layer :
nfilt :
Returns
-------
"""
l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=(7, 1), pad=(3, 0))
l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 7), pad=(0, 3))
l3 = bn_conv(l3, num_filters=nfilt[2][3], filter_size=(7, 1), pad=(3, 0))
l3 = bn_conv(l3, num_filters=nfilt[2][4], filter_size=(1, 7), pad=(0, 3))
l4 = Pool2DLayer(
input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad')
l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)
return ConcatLayer([l1, l2, l3, l4])
def inception_d(input_layer, nfilt):
# Corresponds to a modified version of figure 10 in the paper
"""
Parameters
----------
input_layer :
nfilt :
Returns
-------
"""
l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)
l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)
l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)
return ConcatLayer([l1, l2, l3])
def inception_e(input_layer, nfilt, pool_mode):
# Corresponds to figure 7 in the paper
"""
Parameters
----------
input_layer :
nfilt :
pool_mode :
Returns
-------
"""
l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2a = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 3), pad=(0, 1))
l2b = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(3, 1), pad=(1, 0))
l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1)
l3a = bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 3), pad=(0, 1))
l3b = bn_conv(l3, num_filters=nfilt[2][3], filter_size=(3, 1), pad=(1, 0))
l4 = Pool2DLayer(
input_layer, pool_size=3, stride=1, pad=1, mode=pool_mode)
l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)
return ConcatLayer([l1, l2a, l2b, l3a, l3b, l4])
def build_network():
"""
Returns
-------
"""
input_var = t.tensor4('inputs')
target = t.matrix('targets')
net = {'input': InputLayer((None, 3, 299, 299), input_var=input_var)}
net['conv'] = bn_conv(net['input'],
num_filters=32, filter_size=3, stride=2)
net['conv_1'] = bn_conv(net['conv'], num_filters=32, filter_size=3)
net['conv_2'] = bn_conv(net['conv_1'],
num_filters=64, filter_size=3, pad=1)
net['pool'] = Pool2DLayer(net['conv_2'], pool_size=3, stride=2, mode='max')
net['conv_3'] = bn_conv(net['pool'], num_filters=80, filter_size=1)
net['conv_4'] = bn_conv(net['conv_3'], num_filters=192, filter_size=3)
net['pool_1'] = Pool2DLayer(net['conv_4'],
pool_size=3, stride=2, mode='max')
net['mixed/join'] = inception_a(
net['pool_1'], nfilt=((64,), (48, 64), (64, 96, 96), (32,)))
net['mixed_1/join'] = inception_a(
net['mixed/join'], nfilt=((64,), (48, 64), (64, 96, 96), (64,)))
net['mixed_2/join'] = inception_a(
net['mixed_1/join'], nfilt=((64,), (48, 64), (64, 96, 96), (64,)))
net['mixed_3/join'] = inception_b(
net['mixed_2/join'], nfilt=((384,), (64, 96, 96)))
net['mixed_4/join'] = inception_c(
net['mixed_3/join'],
nfilt=((192,), (128, 128, 192), (128, 128, 128, 128, 192), (192,)))
net['mixed_5/join'] = inception_c(
net['mixed_4/join'],
nfilt=((192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)))
net['mixed_6/join'] = inception_c(
net['mixed_5/join'],
nfilt=((192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)))
net['mixed_7/join'] = inception_c(
net['mixed_6/join'],
nfilt=((192,), (192, 192, 192), (192, 192, 192, 192, 192), (192,)))
net['mixed_8/join'] = inception_d(
net['mixed_7/join'],
nfilt=((192, 320), (192, 192, 192, 192)))
net['mixed_9/join'] = inception_e(
net['mixed_8/join'],
nfilt=((320,), (384, 384, 384), (448, 384, 384, 384), (192,)),
pool_mode='average_exc_pad')
net['mixed_10/join'] = inception_e(
net['mixed_9/join'],
nfilt=((320,), (384, 384, 384), (448, 384, 384, 384), (192,)),
pool_mode='max')
net['pool3'] = GlobalPoolLayer(net['mixed_10/join'])
net['softmax'] = DenseLayer(
net['pool3'], num_units=1008, nonlinearity=softmax)
test_output = lasagne.layers.get_output(net['softmax'], deterministic=True)
test_loss = t.mean(t.sqr(t.maximum(0., 1. - target * test_output)))
test_err = t.mean(t.neq(t.argmax(test_output, axis=1),
t.argmax(target, axis=1)),
dtype=theano.config.floatX)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target], [test_loss, test_err])
return {'model': net['softmax'], 'val_fn': val_fn}
|
# user defined class that makes use of the sub classes of the CalculatorError class
# CalculatorError class has the OperandError and OperatorError extending it
# Calculator class uses these two subclasses to check for errors (operand and operator errors)
from operanderror import OperandError
from operatorerror import OperatorError
class Calculator:
""" a class that makes use of OperandError and Operator error and works binarily"""
def __init__(self, operand_1, operator, operand_2):
self.operand_1 = operand_1
self.operator = operator
self.operand_2 = operand_2
def evaluate(self):
""" returns True if there's no error else False """
opd_1_bool = OperandError(self.operand_1).raise_OperandError()
opt_bool = OperatorError(self.operator).raise_OperatorError()
opd_2_bool = OperandError(
self.operand_2).raise_OperandError()
if not opd_1_bool or not opt_bool or not opd_2_bool:
return False
return True
def calculate(self):
""" returns the computated value of a calculation """
if self.evaluate():
if self.operator == '+':
return self.add()
elif self.operator == '-':
return self.difference()
elif self.operator == '*':
return self.product()
elif self.operator == '**':
return self.pow()
elif self.operator == '/' and OperandError(self.operand_2).raise_OperandZeroError():
return self.div()
elif self.operator == '//' and OperandError(self.operand_2).raise_OperandZeroError():
return self.floor_div()
elif self.operator == '%' and OperandError(self.operand_2).raise_OperandZeroError():
return self.mod()
else:
exit
else:
print("we can not do any calculation, fix the errors before")
def add(self):
""" returns the sum of the operands """
return self.operand_1 + self.operand_2
def difference(self):
""" returns the difference between the operands """
return self.operand_1 - self.operand_2
def product(self):
""" returns the product of the operands """
return self.operand_1 * self.operand_2
def pow(self):
""" returns the power of one operands to the exponent of the second operand """
return self.operand_1 ** self.operand_2
def mod(self):
""" returns the remainder of one operands when divided by the second operand """
return self.operand_1 % self.operand_2
def div(self):
""" return the ratio of one operands when divided by the second operand """
return self.operand_1 / self.operand_2
def floor_div(self):
""" return the quotient of one operands when divided by the second operand """
return self.operand_1 // self.operand_2
|
# -*- coding: utf-8 -*-
'''
Follows the analyzer faithfully.
'''
|
# -*- coding: utf-8 -*-
"""
:Module: freshpy.utils.version
:Synopsis: This simple script contains the package version
:Created By: Jeff Shurtliff
:Last Modified: Jeff Shurtliff
:Modified Date: 04 Jan 2022
"""
from . import log_utils
# Initialize logging
logger = log_utils.initialize_logging(__name__)
# Define special and global variables
__version__ = "1.1.0"
def get_full_version():
"""This function returns the current full version of the ``freshpy`` package.
.. versionadded:: 1.0.0
"""
return __version__
def get_major_minor_version():
"""This function returns the current major.minor (i.e. X.Y) version of the ``freshpy`` package."""
return ".".join(__version__.split(".")[:2])
|
from django.db import models
# Create your models here.
class Autor(models.Model):
nome = models.CharField(max_length=255)
endereco = models.CharField(max_length=255)
site = models.URLField(blank=True, null=True)
email = models.EmailField()
def __str__(self):
return self.nome + ' - ' + self.email
class Artigo(models.Model):
autor = models.ForeignKey(Autor,on_delete=models.CASCADE)
titulo = models.CharField(max_length=200)
publicado_em = models.DateField(auto_now=True)
atualizado_em = models.DateField(auto_now_add=True)
texto = models.TextField()
def __str__(self):
return self.titulo
|
class TestCase(object):
def __init__(self, parameters):
"""
:param parameters: A list containing TestFactor objects
:return: this
"""
self.components = parameters
self.tc_len = len(parameters)
self.tc_array = [None] * self.tc_len
self.val_to_index_map = dict()
for val_list, count in zip([component.get_values_list() for component in self.components], range(self.tc_len)):
for value in val_list:
self.val_to_index_map[value] = count
def __contains__(self, item):
return item in self.tc_array
def get_uncovered_parameters_set(self):
"""
:return: a set with TestFactor objects that are uncovered in this test case
"""
return frozenset(self.components[i] for i in range(self.tc_len) if self.tc_array[i] is None)
def add_slot(self, slot):
"""
Adds the slot contents (values) to this test case
:param slot:
:return: void
"""
for val in slot:
# TODO - Check that the value is not yet assigned
self.tc_array[self.val_to_index_map[val]] = val
def get_values_list(self):
return [val for val in self.tc_array if val is not None]
def __repr__(self):
pass #TODO
|
'''
- Leetcode problem: 39
- Difficulty: Medium
- Brief problem description:
Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique
combinations in candidates where the candidate numbers sums to target.
The same repeated number may be chosen from candidates unlimited number of times.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [2,3,6,7], target = 7,
A solution set is:
[
[7],
[2,2,3]
]
Example 2:
Input: candidates = [2,3,5], target = 8,
A solution set is:
[
[2,2,2,2],
[2,3,3],
[3,5]
]
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
result = []
self.calSum(candidates, [], 0, target, 0, result)
return result
def calSum(self, candidates, combined, comSum, target, curIndex, result):
if comSum == target:
result.append(combined[:])
return
if comSum > target:
return
else:
for i in range(curIndex, len(candidates)):
combined.append(candidates[i])
curSum = comSum + candidates[i]
self.calSum(candidates, combined, curSum, target, i, result)
combined.pop()
if curSum >= target:
break
|
# Copyright 2004-present, Facebook. All Rights Reserved.
from django import forms
class CreateProductForm(forms.Form):
title = forms.CharField(
label="Product Title",
max_length=255,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
description = forms.CharField(
label="Description",
max_length=2048,
widget=forms.Textarea(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400", "rows": 5, "cols": 20}
),
)
amount = forms.DecimalField(
label="Amount",
max_value=500000,
min_value=0,
decimal_places=2,
max_digits=8,
widget=forms.NumberInput(
attrs={
"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400",
"step": "0.01",
}
),
)
brand = forms.CharField(
label="Brand",
max_length=100,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
inventory = forms.IntegerField(
label="Inventory",
widget=forms.NumberInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
link = forms.CharField(
label="Product URL",
max_length=1024,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
image_link = forms.CharField(
label="Product Image URL",
max_length=1024,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
class UpdateProductForm(forms.Form):
title = forms.CharField(
required=False,
label="Product Title",
max_length=255,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
description = forms.CharField(
required=False,
label="Description",
max_length=2048,
widget=forms.Textarea(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400", "rows": 5, "cols": 20}
),
)
amount = forms.DecimalField(
required=False,
label="Amount",
max_value=500000,
min_value=0,
decimal_places=2,
max_digits=8,
widget=forms.NumberInput(
attrs={
"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400",
"step": "0.01",
}
),
)
brand = forms.CharField(
required=False,
label="Brand",
max_length=100,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
inventory = forms.IntegerField(
required=False,
label="Inventory",
widget=forms.NumberInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
link = forms.CharField(
required=False,
label="Product URL",
max_length=1024,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
image_link = forms.CharField(
required=False,
label="Product Image URL",
max_length=1024,
widget=forms.TextInput(
attrs={"class": "mx-4 p-2 border rounded border-gray-300 text-gray-400"}
),
)
|
from functools import partial
from uuid import uuid1, uuid4, UUID
from annotator import annotation, document
from annotator.auth import DEFAULT_TTL
from horus.models import (
get_session,
BaseModel,
ActivationMixin,
GroupMixin,
UserMixin,
UserGroupMixin,
)
import transaction
from pyramid_basemodel import Base, Session
from pyramid.i18n import TranslationStringFactory
_ = TranslationStringFactory(__package__)
from sqlalchemy import func, or_
from sqlalchemy.dialects import postgresql as pg
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer, TypeDecorator, CHAR, VARCHAR
from sqlalchemy.ext.declarative import declared_attr
import sqlalchemy as sa
try:
import simplejson as json
except ImportError:
import json
from h import interfaces, lib
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string.
Usage::
JSONEncodedDict(255)
"""
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class GUID(TypeDecorator):
"""Platform-independent GUID type.
From http://docs.sqlalchemy.org/en/latest/core/types.html
Copyright (C) 2005-2011 the SQLAlchemy authors and contributors
Uses Postgresql's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(pg.UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, UUID):
return "%.32x" % UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return UUID(value)
def python_type(self):
return UUID
class Annotation(annotation.Annotation):
__mapping__ = {
'annotator_schema_version': {'type': 'string'},
'created': {'type': 'date'},
'updated': {'type': 'date'},
'quote': {'type': 'string'},
'tags': {'type': 'string', 'index_name': 'not_analyzed'},
'text': {'type': 'string'},
'deleted': {'type': 'boolean'},
'uri': {'type': 'string', 'index': 'not_analyzed'},
'user': {'type': 'string', 'index': 'not_analyzed'},
'consumer': {'type': 'string', 'index': 'not_analyzed'},
'target': {
'properties': {
'id': {
'type': 'multi_field',
'path': 'just_name',
'fields': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'uri': {'type': 'string', 'index': 'not_analyzed'},
},
},
'source': {
'type': 'multi_field',
'path': 'just_name',
'fields': {
'source': {'type': 'string', 'index': 'not_analyzed'},
'uri': {'type': 'string', 'index': 'not_analyzed'},
},
},
'selector': {
'properties': {
'type': {'type': 'string', 'index': 'no'},
# Annotator XPath+offset selector
'startContainer': {'type': 'string', 'index': 'no'},
'startOffset': {'type': 'long', 'index': 'no'},
'endContainer': {'type': 'string', 'index': 'no'},
'endOffset': {'type': 'long', 'index': 'no'},
# Open Annotation TextQuoteSelector
'exact': {
'type': 'multi_field',
'path': 'just_name',
'fields': {
'exact': {'type': 'string'},
'quote': {'type': 'string'},
},
},
'prefix': {'type': 'string'},
'suffix': {'type': 'string'},
# Open Annotation (Data|Text)PositionSelector
'start': {'type': 'long'},
'end': {'type': 'long'},
}
}
}
},
'permissions': {
'index_name': 'permission',
'properties': {
'read': {'type': 'string', 'index': 'not_analyzed'},
'update': {'type': 'string', 'index': 'not_analyzed'},
'delete': {'type': 'string', 'index': 'not_analyzed'},
'admin': {'type': 'string', 'index': 'not_analyzed'}
}
},
'references': {'type': 'string', 'index': 'not_analyzed'},
'document': {
'properties': document.MAPPING
},
'thread': {
'type': 'string',
'analyzer': 'thread'
}
}
__settings__ = {
'analysis': {
'analyzer': {
'thread': {
'tokenizer': 'path_hierarchy'
}
}
}
}
@classmethod
def update_settings(cls):
cls.es.conn.indices.close(index=cls.es.index)
try:
cls.es.conn.indices.put_settings(
index=cls.es.index,
body=getattr(cls, '__settings__', {})
)
finally:
cls.es.conn.indices.open(index=cls.es.index)
class Document(document.Document):
pass
class ConsumerMixin(BaseModel):
"""
API Consumer
The annotator-store :py:class:`annotator.auth.Authenticator` uses this
function in the process of authenticating requests to verify the secrets of
the JSON Web Token passed by the consumer client.
"""
key = Column(GUID, default=partial(uuid1, clock_seq=id(Base)), index=True)
secret = Column(GUID, default=uuid4)
ttl = Column(Integer, default=DEFAULT_TTL)
def __init__(self, **kwargs):
super(ConsumerMixin, self).__init__()
self.__dict__.update(kwargs)
def __repr__(self):
return '<Consumer %r>' % self.key
@classmethod
def get_by_key(cls, key):
return Session().query(cls).filter(cls.key == key).first()
class Activation(ActivationMixin, Base):
pass
class Consumer(ConsumerMixin, Base):
pass
class Group(GroupMixin, Base):
pass
class User(UserMixin, Base):
@declared_attr
def subscriptions(self):
return sa.Column(sa.BOOLEAN, nullable=False, default=False)
@classmethod
def get_by_username(cls, request, username):
session = get_session(request)
lhs = func.replace(cls.username, '.', '')
rhs = username.replace('.', '')
return session.query(cls).filter(
func.lower(lhs) == rhs.lower()
).first()
@classmethod
def get_by_username_or_email(cls, request, username, email):
session = get_session(request)
lhs = func.replace(cls.username, '.', '')
rhs = username.replace('.', '')
return session.query(cls).filter(
or_(
func.lower(lhs) == rhs.lower(),
cls.email == email
)
).first()
class UserGroup(UserGroupMixin, Base):
pass
class UserSubscriptions(BaseModel, Base):
@declared_attr
def username(self):
return sa.Column(
sa.Unicode(30),
sa.ForeignKey(
'%s.%s' % (UserMixin.__tablename__, 'username'),
onupdate='CASCADE',
ondelete='CASCADE'
),
nullable=False
)
@declared_attr
def query(self):
return sa.Column(JSONEncodedDict(4096), nullable=False)
@declared_attr
def template(self):
return sa.Column(sa.Enum('reply_notification', 'custom_search'), nullable=False, default='custom_search')
@declared_attr
def description(self):
return sa.Column(sa.VARCHAR(256), default="")
@declared_attr
def type(self):
return sa.Column(sa.Enum('system', 'user'), nullable=False, default='user')
@declared_attr
def active(self):
return sa.Column(sa.BOOLEAN, default=True, nullable=False)
def groupfinder(userid, request):
user = request.user
groups = None
if user:
groups = []
for group in user.groups:
groups.append('group:%s' % group.name)
groups.append('acct:%s@%s' % (user.username, request.server_name))
return groups
def includeme(config):
registry = config.registry
config.include('pyramid_basemodel')
config.include('pyramid_tm')
config.set_request_property(lib.user_property, 'user')
if not registry.queryUtility(interfaces.IDBSession):
registry.registerUtility(Session, interfaces.IDBSession)
if not registry.queryUtility(interfaces.IUserClass):
registry.registerUtility(User, interfaces.IUserClass)
if not registry.queryUtility(interfaces.IConsumerClass):
registry.registerUtility(Consumer, interfaces.IConsumerClass)
if not registry.queryUtility(interfaces.IActivationClass):
registry.registerUtility(Activation, interfaces.IActivationClass)
settings = config.get_settings()
key = settings['api.key']
secret = settings.get('api.secret')
ttl = settings.get('api.ttl', DEFAULT_TTL)
session = Session()
with transaction.manager:
consumer = Consumer.get_by_key(key)
if not consumer:
consumer = Consumer(key=key)
consumer.secret = secret
consumer.ttl = ttl
session.add(consumer)
session.flush()
registry.consumer = consumer
|
import finding_hidden_messages_in_dna
import sys
def main():
input_file = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w')
pattern = input_file.readline().strip()
text = input_file.readline().strip()
expected_hamming_distance = int(input_file.readline().strip())
approximate_occurrences_list = finding_hidden_messages_in_dna.find_all_approximate_occurrences_of_pattern_with_given_hamming_distance_in_text(
text, pattern, expected_hamming_distance)
print("Count:", len(approximate_occurrences_list))
for index in approximate_occurrences_list:
print(index, end=" ")
if __name__ == "__main__":
main()
|
# coding: utf-8
# In[1]:
from magma import *
class FullAdder(Circuit):
name = "FullAdder"
IO = ["a", In(Bit), "b", In(Bit), "cin", In(Bit), "out", Out(Bit), "cout", Out(Bit)]
@classmethod
def definition(io):
# Generate the sum
_sum = io.a ^ io.b ^ io.cin
wire(_sum, io.out)
# Generate the carry
carry = (io.a & io.b) | (io.b & io.cin) | (io.a & io.cin)
wire(carry, io.cout)
# In[2]:
from magma.backend.verilog import compile as compile_verilog
print(compile_verilog(FullAdder))
# In[3]:
from magma.simulator.python_simulator import testvectors
test_vectors = [
[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 1, 1, 1]
]
tests = testvectors(FullAdder)
print(tests)
print( "Success" if tests == test_vectors else "Failure" )
# In[4]:
from magma.waveform import waveform
waveform(tests, ["a", "b", "cin", "sum", "cout"])
|
import json
from django.core.serializers import serialize
from django.db.models.query import QuerySet
from django.template import Library
register = Library()
@register.filter()
def jsonify(obj):
if isinstance(obj, QuerySet):
return serialize('json', obj)
return json.dumps(obj)
|
from unittest import TestCase
from mock import patch, Mock
from tables.rows.builders import MovieSearchRowBuilder
class MockMovie:
def __init__(self, name, rotten_tomatoes_score, year, cast):
self.name = name
self.rotten_tomatoes_score = rotten_tomatoes_score
self.year = year
self.cast = cast
class TestMovieSearchRowBuilderCast(TestCase):
row_builder = MovieSearchRowBuilder()
@patch("tables.rows.builders.convert_to_ascii")
def test_cast(self, mock_ascii_conversion):
mock_ascii_conversion.return_value = "converted to ascii"
cast = ["actor1", "actor2"]
expected = "converted to ascii\nconverted to ascii"
self.assertEqual(expected, self.row_builder.cast(cast=cast))
mock_ascii_conversion.assert_any_call(text="actor1")
mock_ascii_conversion.assert_any_call(text="actor2")
class TestMovieSearchRowBuilderName(TestCase):
row_builder = MovieSearchRowBuilder()
@patch("tables.rows.builders.convert_to_ascii")
@patch("tables.rows.builders.colored")
@patch("tables.rows.builders.wrap")
def test_name(self, mock_wrap, mock_colored, mock_ascii_conversion):
name_parts = ["jae", "baebae"]
mock_ascii_conversion.return_value = "converted to ascii"
mock_colored.return_value = "colored"
mock_wrap.return_value = name_parts
expected = "colored\ncolored"
self.assertEqual(expected, self.row_builder.name(name="name"))
mock_ascii_conversion.assert_called_once_with(text="name")
mock_wrap.assert_called_once_with(text="converted to ascii", width=30)
mock_colored.assert_any_call("jae", attrs=["bold"])
mock_colored.assert_any_call("baebae", attrs=["bold"])
class TestMovieSearchRowBuilderBuild(TestCase):
row_builder = MovieSearchRowBuilder()
def test_build(self):
year = "year"
name = "name"
cast = "cast"
rotten_tomatoes_score = "rotten tomatoes score"
movie = MockMovie(name=name, rotten_tomatoes_score=rotten_tomatoes_score, year=year, cast=cast)
rating_formatter = "rating formatter"
self.row_builder.name = Mock("name")
self.row_builder.name.return_value = name
self.row_builder.rating_formatter.format = Mock("rating_formatter")
self.row_builder.rating_formatter.format.return_value = rating_formatter
self.row_builder.cast = Mock("cast")
self.row_builder.cast.return_value = cast
expected = [name, rating_formatter, year, cast]
self.assertEqual(expected, self.row_builder.build(movie=movie))
self.row_builder.name.assert_called_once_with(name=name)
self.row_builder.rating_formatter.format.assert_called_once_with(rating=rotten_tomatoes_score)
self.row_builder.cast.assert_called_once_with(cast=cast)
|
#!/usr/bin/env python3
"""
A python package for climate scientists.
"""
# Global constants
# NOTE: Keep databases here so that autoreload doesn't break everything
DERIVATIONS = {}
TRANSFORMATIONS = {}
# Import functions to top-level. Recommended syntax for registries is one of:
# NOTE: Submodules are for organization and should not be accessible by users
import pkg_resources as _pkg
from .unit import * # noqa: F401, F403
from .cfvariable import * # noqa: F401 F403
from .accessor import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
from .diff import * # noqa: F401, F403
from .var import * # noqa: F401, F403
from .waves import * # noqa: F401, F403
from .spectral import * # noqa: F401, F403
from .spherical import * # noqa: F401, F403
from .downloads import * # noqa: F401, F403
from . import const # noqa: F401
from . import context # noqa: F401
from . import internals # noqa: F401
from . import definitions # noqa: F401
# SCM versioning
name = 'climopy'
try:
version = __version__ = _pkg.get_distribution(__name__).version
except _pkg.DistributionNotFound:
version = __version__ = 'unknown'
|
from .base_page import BasePage
from .locators import BasketPageLocators
class BasketPage(BasePage):
def should_be_empty_basket_message(self):
assert self.is_element_present(*BasketPageLocators.BASKET_EMPTY_MESSAGE), \
"Empty basket message element not found on page"
assert self.browser.find_element(*BasketPageLocators.BASKET_EMPTY_MESSAGE).text == "Your basket is empty. Continue shopping", \
"Invalid Basket empty message"
def should_be_empty_basket(self):
assert self.is_not_element_present(*BasketPageLocators.BASKET_ITEM_EXIST_SELECTOR), \
"Busket is not empty, but should be"
|
# -*- coding: utf-8 -*-
"""Tests for HighRes3DNet."""
import numpy as np
import tensorflow as tf
from nobrainer.models.highres3dnet import HighRes3DNet
def test_highres3dnet():
shape = (1, 5, 5, 5)
X = np.random.rand(*shape, 1).astype(np.float32)
y = np.random.randint(0, 9, size=(shape), dtype=np.int32)
def dset_fn():
return tf.data.Dataset.from_tensors((X, y))
estimator = HighRes3DNet(
n_classes=10,
optimizer='Adam',
learning_rate=0.001)
estimator.train(input_fn=dset_fn)
# With optimizer object.
optimizer = tf.train.AdagradOptimizer(learning_rate=0.001)
estimator = HighRes3DNet(
n_classes=10,
optimizer=optimizer,
learning_rate=0.001)
estimator.train(input_fn=dset_fn)
# With one batchnorm layer per residually connected pair and dropout.
estimator = HighRes3DNet(
n_classes=10,
optimizer='Adam',
learning_rate=0.001,
one_batchnorm_per_resblock=True,
dropout_rate=0.25)
estimator.train(input_fn=dset_fn)
|
import re
class LEDTester():
def __init__(self, N):
self.N = N
self.dataGrid = [[False]*self.N for i in range(self.N)]
def apply(self,i):
pat = re.compile(
".*(turn on|turn off|switch)\s*([+-]?\d+)\s*,\s*([+-]?\d+)\s*through\s*([+-]?\d+)\s*,\s*([+-]?\d+).*")
commands = pat.search(i)
if commands!=None:
method = commands.group(1)
row1 = int(commands.group(2))
col1 = int(commands.group(3))
row2 = int(commands.group(4))
col2 = int(commands.group(5))
if row1 < 0:
row1 = 0
if col1 < 0:
col1 = 0
if row2 >= self.N:
row2 = self.N-1
if col2 >= self.N:
col2 = self.N-1
if method == "turn on":
for r in range(row1,row2+1):
for c in range(col1,col2+1):
self.dataGrid[r][c] = True
elif method == "turn off":
for r in range(row1, row2+1):
for c in range(col1, col2+1):
self.dataGrid[r][c] = False
elif method == "switch":
for r in range(row1, row2+1):
for c in range(col1, col2+1):
self.dataGrid[r][c] = not(self.dataGrid[r][c])
def count(self):
count = sum(r.count(True) for r in self.dataGrid)
return count
|
from discord.errors import ClientException
class UnableToBuildAudioFileException(ClientException):
'''Exception that's thrown when when the bot is unable to build an audio file for playback.'''
def __init__(self, message):
super(UnableToBuildAudioFileException, self).__init__(message)
class BuildingAudioFileTimedOutExeption(UnableToBuildAudioFileException):
'''
Exception that's thrown when when the audio generation logic times out.
See: https://github.com/naschorr/hawking/issues/50
'''
def __init__(self, message):
super(BuildingAudioFileTimedOutExeption, self).__init__(message)
class MessageTooLongException(UnableToBuildAudioFileException):
'''Exception that's thrown during the audio file build process when the user's message is too long'''
def __init__(self, message):
super(MessageTooLongException, self).__init__(message)
|
import peewee as pw
class Object1(pw.Model):
field_1 = pw.TextField(null=True)
class Object2(pw.Model):
field_2 = pw.TextField(null=True)
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the description of a single label.
To determine which labels exist, run get_all_labels.py. This feature is only
available to DFP premium solution networks.
"""
# Import appropriate modules from the client library.
from googleads import dfp
LABEL_ID = 'INSERT_LABEL_ID_HERE'
def main(client, label_id):
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v201611')
# Create a statement to select only active labels.
values = [
{
'key': 'labelId',
'value': {
'xsi_type': 'NumberValue',
'value': label_id
}
}
]
query = 'WHERE id = :labelId'
statement = dfp.FilterStatement(query, values)
# Get labels by filter.
response = label_service.getLabelsByStatement(statement.ToStatement())
if 'results' in response:
# Update each local label object by changing the description.
updated_labels = []
for label in response['results']:
label['description'] = 'These labels are updated.'
updated_labels.append(label)
# Update labels remotely.
labels = label_service.updateLabels(updated_labels)
for label in labels:
print ('Label with id \'%s\' and name \'%s\' was updated.'
% (label['id'], label['name']))
else:
print 'No labels found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, LABEL_ID)
|
"""
Tests for protocol interaction
"""
import pytest
from computable.helpers.transaction import call
def test_w3(w3):
"""
Is web3 correctly setup for testing?
"""
assert w3.eth.defaultAccount == w3.eth.accounts[0]
def test_ether_token_deploy(ether_token):
"""
did the ether token deploy correctly?
"""
assert len(ether_token.account) == 42
assert len(ether_token.address) == 42
assert ether_token.account != ether_token.address
def test_market_token_deploy(market_token_pre):
"""
did the market token deploy correctly?
"""
assert len(market_token_pre.account) == 42
assert len(market_token_pre.address) == 42
assert market_token_pre.account != market_token_pre.address
def test_voting_deploy(voting_pre):
"""
did the voting contract deploy correctly?
"""
assert len(voting_pre.account) == 42
assert len(voting_pre.address) == 42
assert voting_pre.account != voting_pre.address
def test_voting_is_candidate_falsy(w3, voting_pre):
hash = w3.keccak(text='nope')
assert call(voting_pre.is_candidate(hash)) == False
def test_p11r_deploy(parameterizer):
"""
did the p11r contract deploy correctly?
"""
assert len(parameterizer.account) == 42
assert len(parameterizer.address) == 42
assert parameterizer.account != parameterizer.address
def test_reserve_deploy(reserve):
"""
did the reserve contract deploy correctly?
"""
assert len(reserve.account) == 42
assert len(reserve.address) == 42
assert reserve.account != reserve.address
def test_datatrust_deploy(datatrust_pre):
"""
did the datatrust contract deploy correctly?
"""
assert len(datatrust_pre.account) == 42
assert len(datatrust_pre.address) == 42
assert datatrust_pre.account != datatrust_pre.address
def test_listing_deploy(listing):
"""
did the reserve contract deploy correctly?
"""
assert len(listing.account) == 42
assert len(listing.address) == 42
assert listing.account != listing.address
def test_market_token_set_priv(market_token, reserve, listing):
tup = call(market_token.get_privileged())
assert tup[0] == reserve.address
assert tup[1] == listing.address
def test_voting_set_priv(voting, parameterizer, reserve, datatrust_pre, listing):
tup = call(voting.get_privileged())
assert tup[0] == parameterizer.address
assert tup[1] == reserve.address
assert tup[2] == datatrust_pre.address
assert tup[3] == listing.address
def test_datatrust_set_priv(datatrust, listing):
addr = call(datatrust.get_privileged())
assert addr == listing.address
|
from __future__ import unicode_literals
from django.apps import AppConfig
class SampleTicketConfig(AppConfig):
name = 'sample_ticket'
|
import plotly.graph_objects as go
import pandas as pd
from datetime import *
from Constants import *
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
def dukascopy_filter_date(list_of_currencies, from_date, to_date):
filtered_by_data = []
num = 0
for df in list_of_currencies:
df.set_index('Gmt time')
df['Gmt time'] = pd.to_datetime(df['Gmt time'], format='%d.%m.%Y %H:%M:%S.%f')
mask = (df['Gmt time'] >= datetime.strptime(from_date, '%Y-%m-%d')) & \
(df['Gmt time'] <= datetime.strptime(to_date, '%Y-%m-%d'))
df = df.loc[mask]
print('\n')
print('\n')
num += 1
filtered_by_data.append(df)
print('Se han filtrado', num, 'dataframes de', len(list_of_currencies), 'posibles')
return filtered_by_data
def correlation_dataframe(filtered_dataframes, graph_title):
column_selected_df_list = []
i = 0
for df in filtered_dataframes:
df = df.rename(columns={'Open': currencies_list[i]})
df = df.reset_index(drop=True)
column_selected_df_list.append(df[currencies_list[i]])
i += 1
corr_df = pd.concat(column_selected_df_list, axis=1)
df_small = corr_df.iloc[:, :]
correlation_map = df_small.corr()
ax = plt.axes()
mask = np.triu(np.ones_like(correlation_map, dtype=bool))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", colors)
sns.heatmap(correlation_map, vmin=-1, vmax=1, mask=mask, ax=ax, annot=True, linewidths = 1.5,
cmap=cmap, annot_kws={'size': 30})
plt.text(0, 0.1, graph_title, fontsize = 20, color='Black', fontstyle='normal')
plt.show()
def candlestick_print_2_annotations(dataframe_list, annotation_date_1, annotation_date_2, annotation_text_1, annotation_text_2):
num = 0
for df in dataframe_list:
fig = go.Figure(
data=[go.Candlestick(x=df['Gmt time'],
open=df['Open'],
high=df['High'],
low=df['Low'],
close=df['Close'])])
fig.update_layout(
title=currencies_list[num],
xaxis=dict(tickfont=dict(size=25)),
yaxis=dict(tickfont=dict(size=25)),
xaxis_rangeslider_visible=False,
title_font_size=60,
shapes=[dict(x0=annotation_date_1, x1=annotation_date_1, y0=0, y1=1, xref='x', yref='paper', line_width=2),
dict(x0=annotation_date_2, x1=annotation_date_2, y0=0, y1=1, xref='x', yref='paper', line_width=2),],
annotations=[dict(x=annotation_date_1, y=0.05, xref='x', yref='paper', showarrow=False, xanchor='right', text=annotation_text_1),
dict(x=annotation_date_2, y=0.05 , xref='x', yref='paper', showarrow=False, xanchor='left', text=annotation_text_2)])
fig.update_annotations(font_size=25)
num += 1
fig.show()
print('Se han hecho', num, 'gráficas de', len(dataframe_list), 'posibles')
def candlestick_print_3_annotations(dataframe_list, annotation_date_1, annotation_date_2, annotation_date_3,
annotation_text_1, annotation_text_2, annotation_text_3):
num = 0
for df in dataframe_list:
fig = go.Figure(
data=[go.Candlestick(x=df['Gmt time'],
open=df['Open'],
high=df['High'],
low=df['Low'],
close=df['Close'])])
fig.update_layout(
title=currencies_list[num],
xaxis=dict(tickfont=dict(size=25)),
yaxis=dict(tickfont=dict(size=25)),
xaxis_rangeslider_visible=False,
title_font_size=60,
shapes=[dict(x0=annotation_date_1, x1=annotation_date_1, y0=0, y1=1, xref='x', yref='paper', line_width=2),
dict(x0=annotation_date_2, x1=annotation_date_2, y0=0, y1=1, xref='x', yref='paper', line_width=2),
dict(x0=annotation_date_3, x1=annotation_date_3, y0=0, y1=1, xref='x', yref='paper', line_width=2)],
annotations=[dict(x=annotation_date_1, y=0.95, xref='x', yref='paper', showarrow=False, xanchor='right', text=annotation_text_1),
dict(x=annotation_date_2, y=0.05, xref='x', yref='paper', showarrow=False, xanchor='right', text=annotation_text_2),
dict(x=annotation_date_3, y=0.05, xref='x', yref='paper', showarrow=False, xanchor='left', text=annotation_text_3)])
fig.update_annotations(font_size=25)
num += 1
fig.show()
print('Se han hecho', num, 'gráficas de', len(dataframe_list), 'posibles')
def candlestick_print_4_annotations(dataframe_list,
annotation_date_1, annotation_date_2, annotation_date_3, annotation_date_4,
annotation_text_1, annotation_text_2, annotation_text_3, annotation_text_4):
num = 0
for df in dataframe_list:
fig = go.Figure(
data=[go.Candlestick(x=df['Gmt time'],
open=df['Open'],
high=df['High'],
low=df['Low'],
close=df['Close'])])
fig.update_layout(
title=currencies_list[num],
xaxis_rangeslider_visible=False,
xaxis=dict(tickfont=dict(size=30)),
yaxis=dict(tickfont=dict(size=30)),
title_font_size=60,
shapes=[dict(x0=annotation_date_1, x1=annotation_date_1, y0=0, y1=1, xref='x', yref='paper', line_width=2),
dict(x0=annotation_date_2, x1=annotation_date_2, y0=0, y1=1, xref='x', yref='paper', line_width=2),
dict(x0=annotation_date_3, x1=annotation_date_3, y0=0, y1=1, xref='x', yref='paper', line_width=2),
dict(x0=annotation_date_4, x1=annotation_date_4, y0=0, y1=1, xref='x', yref='paper', line_width=2)],
annotations=[dict(x=annotation_date_1, y=0.95, xref='x', yref='paper', showarrow=False, xanchor='right',text=annotation_text_1),
dict(x=annotation_date_2, y=0.01, xref='x', yref='paper', showarrow=False, xanchor='left', text=annotation_text_2),
dict(x=annotation_date_3, y=0.95, xref='x', yref='paper', showarrow=False, xanchor='right',text=annotation_text_3),
dict(x=annotation_date_4, y=0.01, xref='x', yref='paper', showarrow=False, xanchor='left', text=annotation_text_4)])
fig.update_annotations(font_size=30)
num += 1
fig.show()
print('Se han hecho', num, 'gráficas de', len(dataframe_list), 'posibles') |
"""mdsp_color_sr dataset."""
import tensorflow_datasets as tfds
from . import mdsp_color_sr
class MdspColorSrTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for mdsp_color_sr dataset."""
DATASET_CLASS = mdsp_color_sr.MdspColorSr
SPLITS = {
"test": 1,
}
DL_EXTRACT_RESULT = {"face_adyoron_1": "face_adyoron_1.mat"}
if __name__ == "__main__":
tfds.testing.test_main()
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import pickle
from scipy import linalg
from scipy import polyfit
sns.set_context("talk")
def add_season(df, copy=False):
"""
Add a season column to the DataFrame df from its indexes.
copy: Boolean, default False
either or not copy the initial dataframe
"""
month_to_season = np.array([
None,
'DJF', 'DJF',
'MAM', 'MAM', 'MAM',
'JJA', 'JJA', 'JJA',
'SON', 'SON', 'SON',
'DJF'
])
if copy:
df_tmp = df.copy()
df_tmp["season"] = month_to_season[df.index.month]
return df_tmp
else:
df["season"] = month_to_season[df.index.month]
return
def renameIndex():
rename={
"PO_Bio._burning": "Bio. burning",
"PO_Industrial": "Industrial",
"PO_Mineral_dust": "Mineral dust",
"PO_Nitrate_rich": "Nitrate-rich",
"PO_Primary_bio": "Primary bio",
"PO_Sea_road_salt": "Sea/road salt",
"PO_Secondary_bio": "Secondary bio",
"PO_Sulfate_rich": "Sulfate-rich",
"PO_Vehicular": "Vehicular",
"PO_AOS_dust": "AOS/dust",
"PO_Débris_végétaux": "Débris végétaux",
"PO_Chlorure": "Chlorure"
}
return rename
def sitesColor():
"""
Colors for the sites. Follows mpl.colors.TABLEAU_COLORS
"""
color ={
"Marnaz": "#1f77b4",
"Passy": "#ff7f0e",
"Chamonix": "#2ca02c",
"Frenes": "#d62728",
"Nice": "#9467bd",
"PdB": "#8c564b",
"Marseille": "#e377c2"
}
color = pd.DataFrame(index=["color"], data=color)
return color
def sourcesColor():
color ={
"Vehicular": "#000000",
"Vehicular": "#000000",
"VEH": "#000000",
"VEH ind": "#111111",
"Vehicular_ind": "#111111",
"Vehicular ind": "#111111",
"VEH dir": "#333333",
"Vehicular_dir": "#333333",
"Vehicular dir": "#333333",
"Oil/Vehicular": "#000000",
"Road traffic": "#000000",
"Bio_burning": "#92d050",
"Bio_burning1": "#92d050",
"Bio burning1": "#92d050",
"Bio_burning2": "#bbd020",
"Bio burning2": "#bbd020",
"Bio. burning": "#92d050",
"Bio burning": "#92d050",
"BB": "#92d050",
"BB1": "#92d050",
"BB2": "#bbd020",
"Sulfate_rich": "#ff2a2a",
"Sulfate-rich": "#ff2a2a",
"Sulfate rich": "#ff2a2a",
"Nitrate_rich": "#ff7f2a",
"Nitrate-rich": "#ff7f2a",
"Nitrate rich": "#ff7f2a",
"Secondaire": "#ff5f2a",
"Secondary_bio": "#8c564b",
"Secondary bio": "#8c564b",
"Secondary biogenic": "#8c564b",
"Marine_bio/HFO": "#8c564b",
"Marine biogenic/HFO": "#8c564b",
"Marine bio/HFO": "#8c564b",
"Marin bio/HFO": "#8c564b",
"Marine_bio": "#fc564b",
"Marine bio": "#fc564b",
"Marine secondary": "#fc564b",
"Marin secondaire": "#fc564b",
"HFO": "#70564b",
"Marine": "#33b0f6",
"Marin": "#33b0f6",
"Salt": "#00b0f0",
"Sea/road salt": "#00b0f0",
"Sea salt": "#00b0f0",
"Aged_salt": "#00b0ff",
"Aged salt": "#00b0ff",
"Aged sea salt": "#00b0ff",
"Aged seasalt": "#00b0ff",
"Primary_bio": "#ffc000",
"Primary bio": "#ffc000",
"Primary biogenic": "#ffc000",
"Biogenique": "#ffc000",
"Biogenic": "#ffc000",
"Dust": "#dac6a2",
"Dust (mineral)": "#dac6a2",
"Mineral dust": "#dac6a2",
"Resuspended dust": "#dac6a2",
"AOS/dust": "#dac6a2",
"Industrial": "#7030a0",
"Indus._veh.": "#7030a0",
"Industry/vehicular": "#7030a0",
"Arcellor": "#7030a0",
"Siderurgie": "#7030a0",
"Plant_debris": "#2aff80",
"Débris végétaux": "#2aff80",
"Choride": "#80e5ff",
"Chlorure": "#80e5ff",
"Other": "#cccccc",
"PM other": "#cccccc",
"nan": "#ffffff"
}
color = pd.DataFrame(index=["color"], data=color)
return color
def plot_corr(df,title=None, alreadyDone=False, ax=None, **kwarg):
"""
Plot the correlation heatmap of df.
This function use the seaborn heatmap function and simply rotate the labels
on the axes.
"""
if ax is None:
f, ax = plt.subplots()
kwarg["ax"] = ax
if "vmax" not in kwarg:
kwarg["vmax"]=1
if "square" not in kwarg:
kwarg["square"]=True
if alreadyDone:
sns.heatmap(df,**kwarg)
else:
sns.heatmap(df.corr(),**kwarg)
ax.set_yticklabels(df.index[::-1],rotation=0)
ax.set_xticklabels(df.columns,rotation=-90)
if title is not None:
ax.set_title(title)
elif hasattr(df,"name"):
ax.set_title(df.name)
return ax
def plot_scatterReconsObs(ax, obs, model, p, r2):
"""
Scatter plot between the observation and the model.
"""
pd.concat((obs,model), axis=1).plot(ax=ax,x=[0], y=[1], kind="scatter")
plt.plot([0,obs.max()],[0, obs.max()], '--', label="y=x")
plt.plot([0,obs.max()],[p[1], p[0]*obs.max()+p[1]], label="linear fit")
posy = 0.7*plt.ylim()[1]
plt.text(0,posy,"y=%.2fx+%0.2f\nr²=%0.2f" % (p[0],p[1],r2[0,1]))
plt.xlabel("Obs.")
plt.ylabel("Recons.")
plt.title("obs. vs reconstruction")
ax.set_aspect(1./ax.get_data_ratio())
l=plt.legend(loc="lower right")
l.draw_frame(False)
def plot_station_sources(station,**kwarg):
"""
Plot the mass contrib (piechart), the scatter plot obs/recons, the
intrinsic PO and the contribution of the sources/species (TS + piechart).
TODO: it's ugly...
"""
plt.figure(figsize=(17,8))
# Mass contribution (pie chart)
ax=plt.subplot(2,3,1)
plot_contribPie(ax, station.pieCHEM)
# Bar plot of coeff for the PO
ax=plt.subplot(2,3,2)
plot_coeff(station,ax)
plt.ylabel("PO [nmol/min/µg]")
# Scatter plot obs/recons.
ax=plt.subplot(2,3,3)
plot_scatterReconsObs(ax, station.PO, station.model, station.p, station.r2)
# time serie reconstruction/observation
ax=plt.subplot(2,3,(4,5))
plot_ts_reconstruction_PO(station,ax=ax)
plt.legend(mode="expand", bbox_to_anchor=(0.5,-0.1))
# PO contribution (pie chart)
ax=plt.subplot(2,3,6)
plot_contribPie(ax, station)
plt.subplots_adjust(top=0.95, bottom=0.16, left=0.07, right=0.93)
def plot_station(station,POtype,**kwarg):
"""
Plot the time series obs & recons, the scatter plot obs/recons, the
intrinsic PO and the contribution of the sources/species.
"""
plt.figure(figsize=(17,8))
# time serie reconstruction/observation
ax=plt.subplot(2,3,(1,3))
ax.errorbar(station.PO.index.to_pydatetime(), station.PO,
yerr=station.POunc,
ecolor="black",
elinewidth=1,
fmt="b-o",
markersize=6,
label="Obs.",
zorder=1)
ax.plot_date(station.model.index.to_pydatetime(), station.model, "r-*",
label="Recons.",zorder=10)
ax.set_ylabel("{PO} loss\n[nmol/min/m³]".format(PO=POtype[:-1]))
plt.title("{station} {POt}".format(station=station.name, POt=POtype))
handles, labels = ax.get_legend_handles_labels()
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
l=ax.legend(handles, labels) # in order to have Obs 1st
l.draw_frame(False)
# scatter plot reconstruction/observation
ax=plt.subplot(2,3,4)
plot_scatterReconsObs(ax, station.PO, station.model, station.p, station.r2)
# factors contribution
ax=plt.subplot(2,3,5)
plot_coeff(station, ax=ax)
plt.ylabel("PO [nmol/min/µg]")
# Pie chart
ax=plt.subplot(2,3,6)
plot_contribPie(ax, station,**kwarg)
plt.subplots_adjust(top=0.95, bottom=0.16, left=0.07, right=0.93)
def plot_contribPie(ax, station, fromSource=True, title=None, ylabel=None,
**kwarg):
"""
Plot contributions of the sources to the PO in a Pie chart
The contributions is G*m.
"""
# check if station is an object or a DataFrame
if isinstance(station, pd.Series):
df = station
else:
if not(station.hasPO):
ax.set_aspect('equal')
p = station.pie.plot.pie(ax=ax, **kwarg)
ax.set_ylabel("")
return
df = station.pie
l = df.index
l = [a.replace("_"," ") for a in l]
df.index = l
if fromSource:
c = sourcesColor()
cols = c.ix["color",df.index].values
ax.set_aspect('equal')
p = df.plot.pie(ax=ax,
shadow=False,
startangle=90,
colors=cols,
**kwarg)
else:
ax.set_aspect('equal')
p = df.plot.pie(ax=ax,
shadow=False,
startangle=90,
**kwarg)
print(p)
labels = df.index
#for p1, l1 in zip(p[0], labels):
# r = p1.r
# dr = r*0.1
# t1, t2 = p1.theta1, p1.theta2
# theta = (t1+t2)/2.
#
# xc, yc = r/2.*cos(theta/180.*pi), r/2.*sin(theta/180.*pi)
# x1, y1 = (r+dr)*cos(theta/180.*pi), (r+dr)*sin(theta/180.*pi)
# if x1 > 0 :
# x1 = r+2*dr
# ha, va = "left", "center"
# tt = -180
# cstyle="angle,angleA=0,angleB=%f"%(theta,)
# else:
# x1 = -(r+2*dr)
# ha, va = "right", "center"
# tt = 0
# cstyle="angle,angleA=0,angleB=%f"%(theta,)
#
# annotate(l1,
# (xc, yc), xycoords="data",
# xytext=(x1, y1), textcoords="data", ha=ha, va=va,
# arrowprops=dict(arrowstyle="-",
# connectionstyle=cstyle,
# patchB=p1))
if title is not None:
ax.set_title(title)
ax.set_ylabel("")
def plot_coeff(stations, yerr=None, ax=None):
"""Plot a bar plot of the intrinsique PO of the sources for all the station"""
if ax==None:
ax = plt.subplots(row=len(POtype_list), columns=len(stations.keys()))
c = sitesColor()
cols = list()
try:
for s in stations:
cols.append(c.ix["color"][s])
stations.plot.bar(ax=ax, yerr=yerr, legend=False, color=cols,rot=30)
except TypeError:
cols.append(c.ix["color"][stations.name])
stations.m.plot.bar(ax=ax, yerr=stations.covm, legend=False, color=cols)
def plot_ts_contribution_PO(station,POtype=None,saveDir=None):
"""
Plot the time serie contribution of each source to the PO.
station can be the name of the station or a Station object.
If station is a string, then the saveDir variable must be the path to the
directory where the file is saved.
The file name must be in the format
{station name}_contribution_{POtype}.csv
"""
if isinstance(station, str):
if saveDir == None:
print("ERROR: the 'saveDir' argument must be completed")
return
print("Use the saved results")
title = station
fileName = saveDir+station+"_contribution_"+POtype+".csv"
df = pd.read_csv(fileName,index_col="date", parse_dates=["date"])
else:
df = station.CHEM * station.m
title = station.name
c = sourcesColor()
cols = c.ix["color",df.columns].values
df.plot(title=title, color=cols)
plt.ylabel(POtype)
return
def plot_ts_reconstruction_PO(station, POtype=None, POobs=None, saveDir=None, ax=None):
"""
Plot a stacked barplot of for the sources contributions to the PO
"""
if ax == None:
f, ax = plt.subplots(1, figsize=(10,5))
if isinstance(station, str):
if saveDir == None or POobs == None:
print("ERROR: the 'saveDir' and 'POobs' arguments must be completed")
return
title = station
fileName = saveDir+station+"_contribution_"+POtype+".csv"
PO = POobs
df = pd.read_csv(fileName,index_col="date", parse_dates=["date"])
else:
df = station.CHEM * station.m
PO = station.PO.values
POunc = station.POunc.values
title = station.name
c = sourcesColor()
cols = c.ix["color",df.columns].values
# Date index
x = df.index
x.to_datetime()
# Width
# Set it to 1.5 when no overlapping, 1 otherwise.
width = np.ones(len(x))*1.5
deltal = x[1:]-x[:-1]
deltal = deltal.append(pd.TimedeltaIndex([10,],'D'))
deltar = pd.TimedeltaIndex([3],'D')
deltar = deltar.append(x[1:]-x[:-1])
width[deltal < np.timedelta64(2,'D')] = 1
width[deltar < np.timedelta64(2,'D')] = 1
# Stacked bar plot
count = 0
for i in range(df.shape[1]):
bottom=df.ix[:,0:count].sum(axis=1)
count += 1
ax.bar(x, df[df.columns[i]],
bottom=bottom,
label=df.columns[i],
width=width,
color=c[df.columns[i]])
# PO observation
ax.errorbar(x, PO, POunc, fmt='ob', ecolor="black", elinewidth=1, markersize=3, label="OP obs.")
# legend stuff
ncol = int((len(df.columns)+1)/2)
nrow = (len(df.columns)+1)/ncol
if nrow > 2:
ncol += 1
plt.legend(loc="center",ncol=ncol,bbox_to_anchor=(0.5,-0.16))
plt.title(title)
plt.ylabel(POtype)
plt.subplots_adjust(top=0.90, bottom=0.20, left=0.10, right=0.90)
return
def plot_seasonal_contribution(station, POtype=None, saveDir=None,**kwarg):
"""
Plot a stacked bar plot of the normalized contribution of the source to the
PO.
"""
# first, check of station is a string
# if so, then load the associated Station class (previously saved).
if isinstance(station, str):
with open(saveDir+"/"+station+"_"+POtype+".pickle","rb") as f:
station = pickle.load(f)
df = station.m * station.CHEM
add_season(df)
df_grouped = df.groupby("season").sum()
ordered_season = ["DJF","MAM","JJA","SON"]
df_grouped = df_grouped.reindex(ordered_season)
# selection the colors we have in the sources
colors = sourcesColor()
c = colors.ix["color", df_grouped.columns]
# plot the stacked normalized bar plot
axes = (df_grouped.T / df_grouped.sum(axis=1)).T.plot.bar(stacked=True,
rot=0,
color=c,
**kwarg)
ax = plt.gca()
ax.legend(loc="center",ncol=round(len(df_grouped.columns)/2), bbox_to_anchor=(0.5,-0.2))
ax.set_ylabel("PO contribution (normalized)")
plt.title(station.name+" (DTTv)")
plt.subplots_adjust(top=0.90, bottom=0.20, left=0.15, right=0.85)
def plot_seasonal_contribution_boxplot(station, POtype=None, saveDir=None,**kwarg):
"""
Plot a boxplot contribution of the source to the PO per season.
"""
# first, check of station is a string
# if so, then load the associated Station class (previously saved).
if isinstance(station, str):
with open(saveDir+"/"+station+"_"+POtype+".pickle","rb") as f:
station = pickle.load(f)
df = station.m * station.CHEM
add_season(df)
season = np.array(['DJF', 'MAM', 'JJA','SON'])
df["ordered"] = season[df["season"]]
ordered_season = ["DJF","MAM","JJA","SON"]
# selection the colors we have in the sources
colors = sourcesColor()
c = colors.ix["color", df.columns]
# plot the boxplot
df_long = pd.melt(df,"season",var_name="source", value_name="PO")
ax = sns.boxplot("season", y="PO",hue="source",data=df_long,palette=c)
if "title" in kwarg:
plt.title(kwarg["title"])
|
# -*- coding: utf-8 -*-
from .board import *
from .banner import *
from .post import *
def all():
result = []
models = [board, banner, post]
for m in models:
result += m.__all__
return result
__all__ = all()
|
import random
import copy
import wx
import numpy
from keras import backend
from keras.models import Model
from keras.layers import Dense, Activation, Input
from main import DATASETS_NUM, DEFAULT_MODEL, INPUTS
from main import TRAIN_DATASETS_FORM, DATASETS_FORM, MODE
from main import OPTIMIZERS_LIST, DATASETS_LIST, ACTIVATION_FUNC_LIST
from main import outputPanelSize, outputPanelRange, outputCanvasDensity
import callback
import frame
import app
class MainModel(Model):
session = backend.get_session()
ml_callback = callback.Callback()
# Static property
model_stru = copy.deepcopy(DEFAULT_MODEL)
learning_rate = 3e-2
test_datasets_ratio = 0.2
is_training = False
def __init__(self, main_app):
self.graph = self.session.graph
self.app = main_app
self.choosed_pattern = "Circle"
self.choosed_optimizer = "Adam"
canvas_pos4canvas = []
for j in range(outputCanvasDensity):
for i in range(outputCanvasDensity):
x_for_ml = numpy.interp(
i, [0, outputCanvasDensity - 1], outputPanelRange)
y_for_ml = numpy.interp(
j, [0, outputCanvasDensity - 1],
[outputPanelRange[1], outputPanelRange[0]])
canvas_pos4canvas.append([x_for_ml, y_for_ml])
self.canvas_pos4canvas = canvas_pos4canvas
canvas_pos4neuron = []
for j in range(self.app.output_neuron_density):
for i in range(self.app.output_neuron_density):
x_for_ml = numpy.interp(
i, [0, self.app.output_neuron_density - 1],
outputPanelRange)
y_for_ml = numpy.interp(
j, [0, self.app.output_neuron_density - 1],
[outputPanelRange[1], outputPanelRange[0]])
canvas_pos4neuron.append([x_for_ml, y_for_ml])
self.canvas_pos4neuron = canvas_pos4neuron
self.preparePredictDataForCanvas()
self.randomDatasets()
self.reCreateModel()
def preparePredictDataForCanvas(self):
self.predict_data4canvas = numpy.array(
list(map(self.getInputsForm, self.canvas_pos4canvas)))
self.predict_data4neuron = numpy.array(
list(map(self.getInputsForm, self.canvas_pos4neuron)))
def reCreateModel(self):
inputs = Input(shape=(len(MainModel.model_stru["inputs"]),))
h = inputs
for hidden in MainModel.model_stru["hiddens"]:
h = Dense(hidden["units"])(h)
act_func = hidden["activation"]
if(callable(act_func)):
h = Activation(act_func)(h)
elif(act_func in ACTIVATION_FUNC_LIST):
h = Activation(ACTIVATION_FUNC_LIST[act_func])(h)
if(MODE == 0 or MODE == 1):
outputs = Dense(1)(h)
if(MODE == 2):
outputs = Dense(2)(h) # (output range: [0 -> 1, 0 -> 1])
act_func = MainModel.model_stru["outputs"]["activation"]
if(callable(act_func)):
outputs = Activation(act_func)(outputs)
elif(act_func in ACTIVATION_FUNC_LIST):
outputs = Activation(ACTIVATION_FUNC_LIST[act_func])(outputs)
super(MainModel, self).__init__(inputs=inputs, outputs=outputs)
self.compile(
optimizer=OPTIMIZERS_LIST[self.choosed_optimizer](
lr=MainModel.learning_rate),
loss='mean_squared_error',
metrics=['accuracy'])
index = 2
self.hiddens_layer_model = []
for i in range(len(MainModel.model_stru["hiddens"])):
hidden_layer_model = Model(
inputs=self.input, outputs=self.layers[index].output)
self.hiddens_layer_model.append(hidden_layer_model)
index += 2
self.initLoss()
def initLoss(self):
val_loss = 0
loss = 0
check = True
if(app.MainApp.custom_data):
train_num = len(MainModel.datasets4train["X_train"])
if(train_num <= 0):
check = False
else:
val_loss, val_acc = self.evaluate(
MainModel.datasets4train["X_test"],
MainModel.datasets4train["Y_test"], verbose=0)
if(check):
loss, acc = self.evaluate(
MainModel.datasets4train["X_train"],
MainModel.datasets4train["Y_train"], verbose=0)
app.MainApp.logs["val_loss"] = val_loss
app.MainApp.logs["loss"] = loss
def randomDatasets(self):
frame.MainFrame.display_datas = copy.deepcopy(DATASETS_FORM)
datasets_ = copy.deepcopy(DATASETS_FORM)
test_num = int(DATASETS_NUM * MainModel.test_datasets_ratio)
args = [
random.randint(0, 2), 0, app.MainApp.noise * 0.1, 0, "", 0]
train_i = 0
test_i = 0
for i in range(DATASETS_NUM):
if(i > test_num):
dataset_suffix = "train"
max_num = abs(DATASETS_NUM - test_num)
current_i = train_i
train_i += 1
else:
dataset_suffix = "test"
max_num = test_num
current_i = test_i
test_i += 1
args[3] = current_i
args[4] = dataset_suffix
args[5] = max_num
isPass = False
while(not isPass):
x_rand = random.uniform(
outputPanelRange[0], outputPanelRange[1])
y_rand = random.uniform(
outputPanelRange[0], outputPanelRange[1])
apply_to_pattern = DATASETS_LIST[self.choosed_pattern](
x_rand, y_rand, args)
if(apply_to_pattern is not False):
new_x, new_y, color = apply_to_pattern
if(
new_x > outputPanelRange[1]
or new_x < outputPanelRange[0]
):
continue
if(
new_y > outputPanelRange[1]
or new_y < outputPanelRange[0]
):
continue
y_train = color
if(isinstance(color, float)):
if(MODE == 2):
if(color > 0):
y_train = [color, 0]
else:
y_train = [0, -color]
else:
if(MODE == 2):
y_train = [1, 0] if(color == 1) else [0, 1] # (output range: [0 -> 1, 0 -> 1])
datasets_[dataset_suffix].append([[new_x, new_y], y_train])
x_map = numpy.interp(
new_x, outputPanelRange, [0, outputPanelSize - 1])
y_map = numpy.interp(
new_y, outputPanelRange, [outputPanelSize - 1, 0])
frame.MainFrame.display_datas[dataset_suffix].append(
[x_map, y_map, color])
isPass = True
MainModel.datasets = datasets_
self.updateDatasets()
def updateDatasets(self):
datasets_ = copy.deepcopy(TRAIN_DATASETS_FORM)
for dataset_key in MainModel.datasets:
datasets = MainModel.datasets[dataset_key]
for each_dataset in datasets:
x = each_dataset[0][0]
y = each_dataset[0][1]
color = each_dataset[1]
datasets_["X_" + dataset_key].append(
self.getInputsForm([x, y]))
y_train = color # (output range: [0 -> 1, 0 -> 1])
if(MODE == 0):
y_train = [color] # (output range: -1 -> 1)
if(MODE == 1):
y_train = numpy.interp(color, [-1, 1], [0, 1]) # (output range: 0 -> 1)
datasets_["Y_" + dataset_key].append(y_train)
datasets_["X_train"] = numpy.array(datasets_["X_train"])
datasets_["Y_train"] = numpy.array(datasets_["Y_train"])
datasets_["X_test"] = numpy.array(datasets_["X_test"])
datasets_["Y_test"] = numpy.array(datasets_["Y_test"])
MainModel.datasets4train = datasets_
def predictToOutputCanvas(self, newpredict=True):
neruon_pos = self.app.main_frame.neruonPanelMouseHoveredPos
if(neruon_pos is not None):
i, j = neruon_pos
predict_neroun = self.hiddens_layer_model[i].predict_on_batch(
self.predict_data4canvas).T
self.current_predict_datas = predict_neroun[j]
else:
if(newpredict):
self.current_predict_datas = self.predict_on_batch(
self.predict_data4canvas)
map_predict_datas_to_rgba = list(map(
self.app.mapOutputToRGBA, self.current_predict_datas))
wx.CallAfter(
self.app.main_frame.output_panel.updateOutputCanvasImage,
map_predict_datas_to_rgba)
def predictToNeuronsCanvas(self, newpredict=True):
if(newpredict):
datas = []
for model in self.hiddens_layer_model:
predicts = model.predict_on_batch(self.predict_data4neuron).T
for predict in predicts:
datas.append(predict)
self.current_neuron_predict_datas = datas
map_predict_datas_to_rgba = list(map(
self.app.mapOutputListToRGBAList,
self.current_neuron_predict_datas))
panels = self.app.main_frame.neruon_panels
for panel_index in range(len(panels)):
wx.CallAfter(
panels[panel_index].updateOutputCanvasImage,
map_predict_datas_to_rgba[panel_index])
# Utils
def getInputsForm(self, pos):
inputs = []
for input in MainModel.model_stru["inputs"]:
inputs.append(INPUTS[input](pos[0], pos[1]))
return inputs
def stopTraining(self):
MainModel.is_training = False
|
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from model.architectures.transformations import spatial_dropout, compose_transformation, \
history_crop, add_normal_bias, history_cutout
class TransformationTest(parameterized.TestCase, tf.test.TestCase):
def assert_not_nan(self, tensor):
return self.assertTrue(np.all(~tf.math.is_nan(tensor)))
@parameterized.named_parameters(('spatial_do_10', 0.1),
('spatial_do_50', 0.5))
def test_spatial_dropout(self, rate):
tf_generator = tf.random.Generator.from_seed(1234)
batch = tf.ones((2, 48, 41))
dropped_out = spatial_dropout(batch, tf_generator, rate)
self.assertNotAllEqual(dropped_out[0], dropped_out[1])
self.assertEqual(len(np.unique(tf.reduce_sum(dropped_out[0], axis=1))), 1)
count = 0
for k in range(300):
dropped_out = spatial_dropout(batch, tf_generator, rate)
count += tf.reduce_sum(dropped_out) / (2 * 48 * 41)
count /= 300
self.assertAlmostEqual(1 - count, rate, delta=1e-2)
@parameterized.named_parameters(('low_hist_high_prob', 0.1, 1.0),
('mid_hist_high_prob', 0.5, 1.0),
('mid_high_prob', 0.5, 0.5))
def test_history_crop(self, min_history, proba):
tf_generator = tf.random.Generator.from_seed(1234)
batch = tf.ones((1, 1000, 40))
cropped_seq = 0
for k in range(1000):
seq = history_crop(batch, tf_generator, p=proba, min_history=min_history)
if np.any(seq.numpy() == 0):
cropped_seq += 1
self.assertTrue(np.all(seq[0, -int(1000 * min_history):] == 1))
self.assertAlmostEqual(cropped_seq / 1000, proba, delta=1e-1)
@parameterized.named_parameters(('noise_1', 0.10),
('noise_5', 0.5))
def test_add_bias(self, std):
tf_generator = tf.random.Generator.from_seed(1234)
batch = tf.ones((1, 48, 41))
noised_seq = add_normal_bias(batch, tf_generator, std)
noise = noised_seq - batch
self.assertAllEqual(noise[0], noise[-1])
batch_zeros = tf.zeros((50, 48, 41))
noised_seq = add_normal_bias(batch_zeros, tf_generator, std)
self.assertAllEqual(noised_seq, batch_zeros)
@parameterized.named_parameters(('100', 100),
('200', 200))
def test_history_cutout(self, size):
tf_generator = tf.random.Generator.from_seed(1234)
batch = tf.ones((1, 1000, 10))
cutout_seq = history_cutout(batch, tf_generator, size)
self.assertEqual(tf.reduce_sum(cutout_seq), (1000 - size) * 10)
@parameterized.named_parameters(('compose_spatial_temp_drop', [spatial_dropout, history_cutout]))
def test_composition(self, transformations):
batch = tf.ones((2, 48, 41))
tf_generator = tf.random.Generator.from_seed(1234)
composed = compose_transformation(batch, tf_generator, transformations)
self.assertEqual(batch.shape, composed.shape)
if __name__ == '__main__':
tf.test.main()
|
import argparse
import torch
import torch.nn as nn
from torch.backends import cudnn
from torch.utils import data
import numpy as np
import time
import os, sys
sys.path.append("..")
import datetime
import cv2
from src.networks import UNet_inpainter, Accumulate_LSTM_no_loss
from src.data import Fusion_dataset_smpl_test
from options import get_general_options
from src.utils import Logger, TransferTexture
from src.crn_model import CRN, CRN_small, CRN_smaller
from src.flow_net import Propagation3DFlowNet
from src.cal_flow import float_estimate
def xavier_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.xavier_normal_(m.weight)
elif classname.find('BatchNorm') != -1:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def flip(x, dim):
dim = x.dim() + dim if dim < 0 else dim
return x[tuple(slice(None, None) if i != dim
else torch.arange(x.size(i) - 1, -1, -1).long()
for i in range(x.dim()))]
def texture_warp_pytorch(tex_parts, IUV, device):
IUV = torch.from_numpy(IUV).to(device).cuda()
U = IUV[:, :, 1]
V = IUV[:, :, 2]
#
# R_im = torch.zeros(U.size())
# G_im = torch.zeros(U.size())
# B_im = torch.zeros(U.size())
generated_image = torch.zeros(IUV.size(), device=device).unsqueeze(0).permute(0, 3, 1, 2).cuda()
###
for PartInd in range(1, 25): ## Set to xrange(1,23) to ignore the face part.
# tex = TextureIm[PartInd-1,:,:,:].squeeze() # get texture for each part.
tex = tex_parts[PartInd - 1] # get texture for each part.
#####
# R = tex[:,:,0]
# G = tex[:,:,1]
# B = tex[:,:,2]
###############
# x,y = torch.where(IUV[:,:,0]==PartInd, )
# u_current_points = U[x,y] # Pixels that belong to this specific part.
# v_current_points = V[x,y]
u_current_points = torch.where(IUV[:, :, 0] == PartInd, U.float().cuda(),
torch.zeros(U.size()).cuda()) # Pixels that belong to this specific part.
v_current_points = torch.where(IUV[:, :, 0] == PartInd, V.float().cuda(), torch.zeros(V.size()).cuda())
x = ((255 - v_current_points) / 255. - 0.5) * 2 # normalize to -1, 1
y = (u_current_points / 255. - 0.5) * 2
grid = torch.cat([x.unsqueeze(2), y.unsqueeze(2)], dim=2).unsqueeze(0).to(device).cuda() # 1, H, W, 2
tex_image = tex.unsqueeze(0).float().to(device).cuda() # 1, 3, H, W
sampled_patch = torch.nn.functional.grid_sample(tex_image, grid, mode='bilinear').cuda()
generated_image = torch.where(IUV[:, :, 0] == PartInd, sampled_patch.cuda(), generated_image.cuda())
return generated_image.squeeze()
def train(args):
model_name = args.exp_name
num_frames = args.num_frame
start = time.time()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# set this to prevent matplotlib import error
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
opt = get_general_options()
opt['batch_size'] = 1
ckpt_dir = os.path.join(opt['model_save_dir'], model_name)
opt['network_dir'] = ckpt_dir
opt["num_target"] = 1
if num_frames > 3:
opt["maximum_ref_frames"] = num_frames
else:
opt["maximum_ref_frames"] = num_frames
result_dir = os.path.join(opt["test_save_dir"], model_name)
# accelerate forwarding
cudnn.benchmark = True
num_workers = 2
# train_data = PatchTransferDataset(opt, mode='train')
#
# test_data = PatchTransferDataset(opt, mode='test')
# test_data_loader = data.DataLoader(dataset=test_data, batch_size=1, num_workers=1,
# pin_memory=True).__iter__()
# Model
Accu_model = Accumulate_LSTM_no_loss()
Accu_model_dir = os.path.join(opt['model_save_dir'], model_name)
Accu_model_weight_dir = os.path.join(Accu_model_dir, "Accu_iter_42000.pth")
Accu_model.load_state_dict(torch.load(Accu_model_weight_dir))
Accu_model = nn.DataParallel(Accu_model).to(device)
inpaint_model = UNet_inpainter() # input to this is 7*256*256, both for input and mask
inpaint_model_dir = os.path.join(opt['model_save_dir'], model_name)
inpaint_model_weight_dir = os.path.join(inpaint_model_dir, "inpaint_iter_42000.pth")
inpaint_model.load_state_dict(torch.load(inpaint_model_weight_dir))
inpaint_model = nn.DataParallel(inpaint_model).to(device)
print("load smaller model")
bg_model = CRN_smaller(3)
bg_model_dir = os.path.join(opt['model_save_dir'], model_name)
bg_model_weight_dir = os.path.join(bg_model_dir, "bg_iter_42000.pth")
bg_model.load_state_dict(torch.load(bg_model_weight_dir))
bg_model = nn.DataParallel(bg_model).to(device)
refine_model = CRN_smaller(3, fg=True)
refine_model_dir = os.path.join(opt['model_save_dir'], model_name)
refine_model_weight_dir = os.path.join(refine_model_dir, "refine_iter_42000.pth")
refine_model.load_state_dict(torch.load(refine_model_weight_dir))
refine_model = nn.DataParallel(refine_model).to(device)
propagater = Propagation3DFlowNet(9, 32, 2, 3, use_deconv=False)
propagater_dir = os.path.join(opt['model_save_dir'], model_name)
propagater_weight_dir = os.path.join(propagater_dir, "pro_iter_42000.pth")
propagater.load_state_dict(torch.load(propagater_weight_dir))
propagater = nn.DataParallel(propagater).to(device)
flow_calculator = float_estimate()
flow_calculator = nn.DataParallel(flow_calculator).to(device)
print("preparation cost %f seconds" % (time.time() - start))
start_t = time.time()
Accu_model.eval()
inpaint_model.eval()
bg_model.eval()
refine_model.eval()
n_epoch = 200
# import pdb; pdb.set_trace()
data_start = time.time()
count = 0
train_data = Fusion_dataset_smpl_test(opt, mode='test')
train_data_loader = data.DataLoader(dataset=train_data, batch_size=opt['batch_size'], shuffle=False,
num_workers=num_workers, pin_memory=True)
with torch.no_grad():
for batch_id, (src_data, tgt_data, data_255, smpl_data, vid_name, img_name_list, chosen_frame) in enumerate(
train_data_loader):
count = count + 1
data_t = time.time() - data_start
vid_name = vid_name[0]
# need src_texture_im, src_IUV, tgt_IUV, real, tgt_IUV255
src_img, src_IUV, src_texture_im, src_mask_im, src_common_area, src_mask_in_image = src_data
src_common_area = src_common_area.float().to(device)
# src_common_area=src_common_area.unsqueeze(1).repeat(1,3,1,1)
src_mask_in_image = src_mask_in_image.permute(0, 1, 4, 2, 3).float().to(device)
src_img = src_img.permute(0, 1, 4, 2, 3).float().to(device)
src_IUV = src_IUV.permute(0, 1, 4, 2, 3).float().to(device)
src_texture_im = src_texture_im.permute(0, 1, 4, 2, 3).float().to(device)
src_mask_im = src_mask_im.float().to(device)
tgt_img, tgt_IUV = tgt_data
tgt_img = tgt_img.permute(0, 1, 4, 2, 3).float().to(device)
tgt_IUV = tgt_IUV.permute(0, 1, 4, 2, 3).float().to(device)
# tgt_mask_in_image=tgt_mask_in_image.permute(0,1,4,2,3).float().to(device)
# tgt_texture_im=tgt_texture_im.permute(0,1,4,2,3).float().to(device)
# tgt_mask_im=tgt_mask_im.float().to(device)
bg_mask = (1 - src_mask_in_image[:, 0].squeeze(1))
bg_incomplete = bg_mask * src_img[:, 0].squeeze(1) + (1 - bg_mask) * torch.randn(bg_mask.shape).cuda()
src_IUV255 = data_255[0][:, 0].squeeze()
tgt_IUV255 = data_255[1].squeeze(1)
src_IUV255 = src_IUV255.numpy()
tgt_IUV255 = tgt_IUV255.numpy()
smpl_seq, smpl_real_mask, smpl_vertices = smpl_data
smpl_real_mask = smpl_real_mask.permute(0, 1, 4, 2, 3).float().to(device)
# prev_real_img=prev_real_img.permute(0,3,1,2).float().to(device)
smpl_vertices = smpl_vertices.float().to(device)
smpl_seq = torch.tensor(smpl_seq).float().cuda()
# print("texture_map's shape is:",src_texture_im.shape)
# print("tgt_IUV255's shape is",tgt_IUV255.shape)
if num_frames == 1:
random_index = np.array([0])
if num_frames == 2:
random_index = np.array([0, 1])
if num_frames == 3:
random_index = np.array([0, 1, 2])
if num_frames == 4:
random_index = np.array([0, 1, 2, 3])
if num_frames == 5:
random_index = np.array([0, 1, 2, 3, 4])
src_texture_im_input = []
for i in range(4):
for j in range(6):
concat_image = []
for z in range(random_index.shape[0]):
concat_image.append(
src_texture_im[:, random_index[z], :, i * 200:(i + 1) * 200, j * 200:(j + 1) * 200].squeeze(
1))
src_texture_im_input.append(concat_image)
Accu_output_texture = Accu_model(src_texture_im_input)
random_number_list = []
for i in range(random_index.shape[0]):
random_number_list.append(random_index[i])
if num_frames < opt["maximum_ref_frames"]:
for i in range(opt["maximum_ref_frames"]):
if i not in random_number_list:
src_mask_im[:, i] = src_mask_im[:, i] * 0
src_common_area = (src_common_area * 0).byte()
for i in range(opt["maximum_ref_frames"]):
src_common_area = src_common_area | src_mask_im[:, i].byte()
src_common_area = src_common_area.float()
src_common_area = src_common_area.unsqueeze(1).repeat(1, 3, 1, 1)
for i in range(4):
for j in range(6):
common_area = src_common_area[:, :, i * 200:(i + 1) * 200, j * 200:(j + 1) * 200]
Accu_output_texture[i * 6 + j] = Accu_output_texture[i * 6 + j] * common_area
inpaint_texture = inpaint_model(Accu_output_texture)
save_video_dir = os.path.join(result_dir, vid_name)
if os.path.exists(save_video_dir) == False:
os.makedirs(save_video_dir)
bg_output = bg_model(bg_incomplete, 256)
first_frame = tgt_img[:, 0]
# print(first_frame.shape)
# vis_image=((first_frame.squeeze(0).permute(1,2,0).detach().cpu().numpy()/2+0.5)*255).astype(np.uint8)
# img_save_dir=os.path.join(save_video_dir,img_name_list[0][0])
# cv2.imwrite(img_save_dir,vis_image)
# prev_image=src_img[:,chosen_frame.shape[1]-1]
for i in range(tgt_IUV255.shape[1]):
distance = np.abs(i - chosen_frame)
src_pro = np.argmin(distance)
prev_image = src_img[:, src_pro]
inpaint_warp = torch.full((1, 3, 256, 256), 0, device=device)
inpaint_texture_list = list(map(lambda inp: inp[0], inpaint_texture))
inpaint_warp[0] = texture_warp_pytorch(inpaint_texture_list, tgt_IUV255[0, i], device)
refine_output, fg_mask = refine_model(inpaint_warp[:], 256)
fusion_output = refine_output * fg_mask.repeat(1, 3, 1, 1) + bg_output * (
1 - fg_mask.repeat(1, 3, 1, 1))
pro_index = np.clip(chosen_frame[0, src_pro], 0, 30)
prev_smpl = [smpl_seq[:, pro_index, 0:3], smpl_seq[:, pro_index, 3:75], smpl_vertices[:, pro_index],
smpl_seq[:, pro_index, 75:85]]
tgt_smpl = [smpl_seq[:, i, 0:3], smpl_seq[:, i, 3:75], smpl_vertices[:, i], smpl_seq[:, i, 75:85]]
tsf_image = flow_calculator(prev_image, prev_smpl, tgt_smpl)
flow_pro_input = {'fake_tgt': fusion_output, 'tsf_image': tsf_image, 'use_mask': True,
'tgt_smpl_mask': smpl_real_mask[:, i], 'use_IUV': True, 'tgt_IUV': tgt_IUV[:, i]}
pro_output = propagater(flow_pro_input)
final_output = pro_output['pred_target']
mask = pro_output["weight"]
# prev_image=final_output
coarse_image = np.clip(
(fusion_output[0].squeeze(0).permute(1, 2, 0).detach().cpu().numpy() / 2 + 0.5) * 255, 0,
255).astype(np.uint8)
vis_image = np.clip(
(final_output[0].squeeze(0).permute(1, 2, 0).detach().cpu().numpy() / 2 + 0.5) * 255, 0,
255).astype(np.uint8)
mask_image = np.clip(mask[0].squeeze(0).cpu().numpy() * 255, 0, 255).astype(np.uint8)
vis_tsf_image = np.clip(
(tsf_image[0].squeeze(0).permute(1, 2, 0).detach().cpu().numpy() / 2 + 0.5) * 255, 0, 255).astype(
np.uint8)
# print(mask_image.shape)
# bg_image=np.clip((bg_incomplete[0].squeeze(0).permute(1,2,0).detach().cpu().numpy()/2+0.5)*255,0,255).astype(np.uint8)
coarse_image_dir = os.path.join(save_video_dir, "coarse_" + img_name_list[i][0])
img_save_dir = os.path.join(save_video_dir, img_name_list[i][0])
mask_save_dir = os.path.join(save_video_dir, "mask_" + img_name_list[i][0])
tsf_save_dir = os.path.join(save_video_dir, "tsf_" + img_name_list[i][0])
# bg_save_dir=os.path.join(save_video_dir,"bg_%d.jpg"%(i))
cv2.imwrite(mask_save_dir, mask_image)
cv2.imwrite(img_save_dir, vis_image)
cv2.imwrite(coarse_image_dir, coarse_image)
cv2.imwrite(tsf_save_dir, vis_tsf_image)
# cv2.imwrite(bg_save_dir,bg_image)
print("writing to ", img_save_dir)
# print(inpaint_output.shape)
data_start = time.time()
print("Testing Done.")
if __name__ == '__main__':
import colored_traceback
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', '-e', type=str, required=True, help='experiment name')
parser.add_argument('--num_frame', '-n', type=int, required=True, help='number of input reference frame')
parser.add_argument('--gpu', type=str, required=True, help='specify gpu devices')
parser.add_argument('--debug', action='store_true', help='specify debug mode')
parser.add_argument('--init', type=str, help='weight init method')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
colored_traceback.add_hook()
train(args)
|
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, \
RidgeRegression
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
f = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
noise_vector = np.random.normal(0, noise, n_samples)
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + noise_vector
noiseless_y = f(X)
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=2 / 3)
fig = go.Figure([go.Scatter(x=X, y=noiseless_y, mode="lines+markers",
name="True model"),
go.Scatter(x=train_X, y=train_y,
mode="markers",
name="Train samples"),
go.Scatter(x=test_X, y=test_y, mode="markers",
name="Test samples")], layout=go.Layout(
title=rf"$\text{{True polynomial and noisy samples. noise = {noise},"
rf" m = {n_samples} samples}}$"))
fig.show()
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
errors = np.ndarray([11, 2], float)
degree_range = range(11)
for degree in degree_range:
errors[degree] = cross_validate(PolynomialFitting(degree), train_X,
train_y, mean_square_error)
minimizer = np.argmin(errors[:, 1])
fig = go.Figure(
[go.Scatter(x=list(degree_range), y=errors[:, 0], mode="lines+markers",
name="train error"),
go.Scatter(x=list(degree_range), y=errors[:, 1], mode="lines+markers",
name="validation error"),
go.Scatter(x=[minimizer], y=[errors[minimizer, 1]], mode="markers",
name="validation error minimizer")
], layout=go.Layout(
title=rf"$\text{{Mean Train and Validation Errors Using 5-fold"
rf" Cross Validation. noise = {noise}, m = {n_samples} "
rf"samples}}$"))
fig.show()
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
model = PolynomialFitting(int(minimizer)).fit(train_X, train_y)
print(f"with noise {noise} and {n_samples} samples, k* was {minimizer} and"
f" got a {model.loss(test_X, test_y)} test error.")
def select_regularization_parameter(n_samples: int = 50,
n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True)
train_X, test_X, train_y, test_y = train_test_split(X, y,
test_size=n_samples)
minimizers=[1,1]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
for model, min, max, name, minimizer_i in [
(RidgeRegression, 0.0001, 20, "ridge", 0),
(Lasso, 0.0001, 4, "lasso", 1)]:
errors = np.ndarray([n_evaluations, 2], float)
evaluation_range = np.linspace(min, max, n_evaluations)
for i, lam in enumerate(evaluation_range):
errors[i] = cross_validate(model(lam), train_X, train_y,
mean_square_error)
minimizers[minimizer_i] = evaluation_range[np.argmin(errors[:, 1])]
fig = go.Figure(
[go.Scatter(x=evaluation_range, y=errors[:, 0],
mode="lines+markers",
name="train error"),
go.Scatter(x=evaluation_range, y=errors[:, 1],
mode="lines+markers",
name="validation error")
], layout=go.Layout(
title=rf"$\text{{Mean Train and Validation Errors Using 5-fold"
rf" Cross Validation on {name} regressor over the "
rf"diabetes dataset.}}$"))
fig.show()
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
ridge_error = RidgeRegression(minimizers[0]).fit(train_X, train_y).loss(
test_X, test_y)
lasso_error = mean_square_error(test_y, Lasso(minimizers[1]).fit(train_X,
train_y).
predict(test_X))
ls_error = LinearRegression().fit(train_X, train_y).loss(test_X, test_y)
print(
f"ridge using lambda = {minimizers[0]} gave a test error of {ridge_error}")
print(
f"lasso using lambda = {minimizers[1]} gave a test error of {lasso_error}")
print(f"least squares gave a test error of {ls_error}")
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(noise=0)
select_polynomial_degree(n_samples=1500, noise=10)
select_regularization_parameter()
|
# Importing needed libs
import httplib
# importing beautiful soup for parsing html tags
from bs4 import BeautifulSoup as BS
# setting up the connecting using get request
conn = httplib.HTTPSConnection("www.sslproxies.org")
conn.request("GET", "/")
# getting the response and storing it in data
response = conn.getresponse()
data = response.read()
# applying beautifulsoup for parsing
soup = BS(data,"html.parser")
# parsing the table for the needed infos
table = soup.find('tbody')
rows = table.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
# parsing and storing data in each row
IP_Address,Port,Code_Country,Country,Type_proxy,Google,https,LastCheck = [c.text for c in cols]
# displaying string along with needed infos
print IP_Address+" "+Port+" "+Country+" "+Type_proxy
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Attention cells."""
__all__ = ['PositionalEmbeddingMultiHeadAttentionCell']
import math
import mxnet as mx
from gluonnlp.model.attention_cell import _masked_softmax
class PositionalEmbeddingMultiHeadAttentionCell(mx.gluon.HybridBlock):
"""Multi-head Attention Cell with positional embeddings.
Parameters
----------
d_head
Number of projected units for respectively query, key, value and
positional embeddings per attention head.
num_heads
Number of parallel attention heads
dropout
scaled
weight_initializer : str or `Initializer` or None, default None
Initializer of the weights.
bias_initializer : str or `Initializer`, default 'zeros'
Initializer of the bias.
"""
def __init__(self, d_head: int, num_heads: int, dropout: float, scaled: bool,
weight_initializer=None, bias_initializer='zeros', dtype='float32', prefix=None,
params=None):
super().__init__(prefix=prefix, params=params)
self._d_head = d_head
self._num_heads = num_heads
self._dropout = dropout
self._scaled = scaled
self._dtype = dtype
units = ['query', 'key', 'value', 'emb']
with self.name_scope():
for name in units:
setattr(
self, 'proj_{}'.format(name),
mx.gluon.nn.Dense(units=d_head * num_heads, use_bias=False, flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer, prefix='{}_'.format(name)))
self.query_key_bias = self.params.get('query_key_bias', shape=(num_heads, d_head),
init=bias_initializer)
self.query_emb_bias = self.params.get('query_emb_bias', shape=(num_heads, d_head),
init=bias_initializer)
if dropout:
self._dropout_layer = mx.gluon.nn.Dropout(dropout)
def hybrid_forward(self, F, query, key, value, emb, mask, query_key_bias, query_emb_bias): # pylint: disable=arguments-differ
"""Compute the attention.
Parameters
----------
query : Symbol or NDArray
Query vector. Shape (batch_size, query_length, query_dim)
key : Symbol or NDArray
Key of the memory. Shape (batch_size, memory_length, key_dim)
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, value_dim)
emb : Symbol or NDArray
Positional embeddings. Shape (memory_length, value_dim)
mask : Symbol or NDArray
Mask of the memory slots. Shape (batch_size, query_length, memory_length)
Only contains 0 or 1 where 0 means that the memory slot will not be used.
If set to None. No mask will be used.
Returns
-------
context_vec : Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
att_weights : Symbol or NDArray
Attention weights of multiple heads.
Shape (batch_size, num_heads, query_length, memory_length)
"""
att_weights = self._compute_weight(F, query, key, emb, mask, query_key_bias=query_key_bias,
query_emb_bias=query_emb_bias)
context_vec = self._read_by_weight(F, att_weights, value)
return context_vec, att_weights
def _project(self, F, name, x):
# Shape (batch_size, query_length, num_heads * d_head)
x = getattr(self, 'proj_{}'.format(name))(x)
# Shape (batch_size * num_heads, query_length, d_head)
x = F.transpose(x.reshape(shape=(0, 0, self._num_heads, -1)),
axes=(0, 2, 1, 3))\
.reshape(shape=(-1, 0, 0), reverse=True)
return x
@staticmethod
def _rel_shift(F, x):
"""Perform relative shift operation following Dai et al. (2019) Appendix B
Unlike Dai et al.'s PyTorch implementation, the relative shift is
performed on the last two dimensions of the ndarray x.
Requires len(x.shape) == 3 due to F.swapaxes not supporting negative
indices
"""
# Zero pad along last axis
zero_pad = F.zeros_like(F.slice_axis(x, axis=-1, begin=0, end=1))
x_padded = F.concat(zero_pad, x, dim=-1)
# Reshape to x.shape[:-2] + [x.shape[-1] + 1, x.shape[-2]]
x_padded = F.reshape_like(x_padded, F.swapaxes(x_padded, 1, 2))
# Remove padded elements
x_padded = F.slice_axis(x_padded, axis=-2, begin=1, end=None)
# Reshape back to original shape
x = F.reshape_like(x_padded, x)
return x
def _compute_weight(self, F, query, key, emb, mask, query_key_bias, query_emb_bias):
# Project query, key and emb
proj_query = self.proj_query(query).reshape(shape=(0, 0, self._num_heads, -1))
proj_key = self.proj_key(key).reshape(shape=(0, 0, self._num_heads, -1))
proj_emb = self.proj_emb(emb).reshape(shape=(-1, self._num_heads, self._d_head))
# Add biases and transpose to (batch_size, num_heads, query_length,
# d_head) or (num_heads, query_length, d_head)
query_with_key_bias = F.transpose(
F.broadcast_add(proj_query, F.reshape(query_key_bias, shape=(1, 1, 0, 0),
reverse=True)), axes=(0, 2, 1, 3))
query_with_emb_bias = F.transpose(
F.broadcast_add(proj_query, F.reshape(query_emb_bias, shape=(1, 1, 0, 0),
reverse=True)), axes=(0, 2, 1, 3))
proj_key = F.transpose(proj_key, axes=(0, 2, 1, 3))
proj_emb = F.transpose(proj_emb, axes=(1, 0, 2))
# Broadcast emb along batch axis
proj_emb = F.broadcast_like(F.expand_dims(proj_emb, axis=0), proj_key)
# Merge batch and num_heads axes
query_with_key_bias = query_with_key_bias.reshape(shape=(-1, 0, 0), reverse=True)
proj_key = proj_key.reshape(shape=(-1, 0, 0), reverse=True)
query_with_emb_bias = query_with_emb_bias.reshape(shape=(-1, 0, 0), reverse=True)
proj_emb = proj_emb.reshape(shape=(-1, 0, 0), reverse=True)
if mask is not None:
# Insert and broadcast along num_heads axis. Merge num_heads and
# batch_size axes: (batch_size * num_heads, query_length,
# memory_length)
mask = F.broadcast_axis(F.expand_dims(mask, axis=1), axis=1, size=self._num_heads)\
.reshape(shape=(-1, 0, 0), reverse=True)
att_score_AC = F.batch_dot(query_with_key_bias, proj_key, transpose_b=True)
att_score_BD = F.batch_dot(query_with_emb_bias, proj_emb, transpose_b=True)
# Relative shift
shifted_att_score_BD = self._rel_shift(F, att_score_BD)
att_score = att_score_AC + shifted_att_score_BD
if self._scaled:
att_score = att_score / math.sqrt(self._d_head)
att_weights = _masked_softmax(F, att_score, mask, self._dtype)
if self._dropout:
att_weights = self._dropout_layer(att_weights)
return att_weights.reshape(shape=(-1, self._num_heads, 0, 0), reverse=True)
def _read_by_weight(self, F, att_weights, value):
att_weights = att_weights.reshape(shape=(-1, 0, 0), reverse=True)
proj_value = self._project(F, 'value', value)
context_vec = F.batch_dot(att_weights, proj_value)
context_vec = F.transpose(
context_vec.reshape(shape=(-1, self._num_heads, 0, 0), reverse=True),
axes=(0, 2, 1, 3)).reshape(shape=(0, 0, -1))
return context_vec
|
"""
Preprocess SUN RGB-D before training a segmentation model.
https://rgbd.cs.princeton.edu/
how to run run:
python -m sun.preprocess preprocess-sun
python -m sun.preprocess preprocess-sun-obj-masks
"""
from collections import Counter
import argh
import scipy.io
import os
from tqdm import tqdm
from PIL import Image
import numpy as np
import json
# For SUN, we use conservative maximum margin of 112 (for a model with window size 224)
from segmentation.utils import add_margins_to_image
from segmentation.constants import SUN_CATEGORIES, sun_convert_categories, SUN_LABEL_2_ID
MARGIN_SIZE = 112
SOURCE_PATH = os.environ['SOURCE_DATA_PATH']
TARGET_PATH = os.environ['DATA_PATH']
ANNOTATIONS_DIR = os.path.join(TARGET_PATH, 'annotations')
MARGIN_IMG_DIR = os.path.join(TARGET_PATH, f'img_with_margin_{MARGIN_SIZE}')
def preprocess_sun():
# this scripts about 30 minutes using a single CPU
print(f"Using {len(SUN_CATEGORIES)} object categories")
os.makedirs(ANNOTATIONS_DIR, exist_ok=True)
os.makedirs(MARGIN_IMG_DIR, exist_ok=True)
img_ids = {
'train': [], 'val': [], 'test': []
}
split_mat_path = os.path.join(SOURCE_PATH, 'SUNRGBDtoolbox/traintestSUNRGBD/allsplit.mat')
split_mat = scipy.io.loadmat(split_mat_path)
paths = {
'train': split_mat['trainvalsplit'][0][0][0][:, 0],
'val': split_mat['trainvalsplit'][0][0][1][:, 0],
'test': split_mat['alltest'][0]
}
for split_key in ['train', 'val', 'test']:
class_counter = Counter()
split_paths = paths[split_key]
os.makedirs(os.path.join(MARGIN_IMG_DIR, split_key), exist_ok=True)
os.makedirs(os.path.join(ANNOTATIONS_DIR, split_key), exist_ok=True)
for sample_dir in tqdm(split_paths, desc=split_key):
sample_dir = sample_dir[0]
sample_dir = os.path.relpath(sample_dir, '/n/fs/sun3d/data')
img_id = sample_dir.replace(os.sep, '_').replace('-', '_')
sample_dir = os.path.join(SOURCE_PATH, sample_dir)
# 1. annotations
mat = scipy.io.loadmat(os.path.join(sample_dir, 'seg.mat'))
names = ['void'] + list(np.concatenate([np.concatenate(n.flatten()) for n in mat['names']]))
seg_mat = mat['seglabel'].transpose()
names = [sun_convert_categories.get(n, n) for n in names]
label_ids = np.zeros_like(seg_mat, dtype=np.uint8)
for i in np.unique(seg_mat):
name = names[i]
name = sun_convert_categories.get(name, name)
if name in SUN_LABEL_2_ID:
label_ids[seg_mat == i] = SUN_LABEL_2_ID[name]
class_counter[name] += 1
np.save(os.path.join(ANNOTATIONS_DIR, split_key, f'{img_id}.npy'), label_ids)
# 2. image with added margin
img_dir = os.path.join(sample_dir, 'image')
assert len(os.listdir(img_dir)) == 1
img_path = os.listdir(img_dir)[0]
full_img_path = os.path.join(img_dir, img_path)
with open(full_img_path, 'rb') as f:
img = Image.open(f).convert('RGB')
img_with_margin = add_margins_to_image(img, MARGIN_SIZE)
output_img_path = os.path.join(MARGIN_IMG_DIR, split_key, img_id + '.png')
img_with_margin.save(output_img_path)
img_ids[split_key].append(img_id)
print()
print(f"Object class counts on {split_key} set:")
for cls, count in class_counter.most_common():
print('{:5d}: {:s}'.format(count, cls))
print()
with open(os.path.join(TARGET_PATH, 'all_images.json'), 'w') as fp:
json.dump(img_ids, fp)
def preprocess_sun_obj_masks():
print(f"Using {len(SUN_CATEGORIES)} object categories")
os.makedirs(ANNOTATIONS_DIR, exist_ok=True)
os.makedirs(MARGIN_IMG_DIR, exist_ok=True)
split_mat_path = os.path.join(SOURCE_PATH, 'SUNRGBDtoolbox/traintestSUNRGBD/allsplit.mat')
split_mat = scipy.io.loadmat(split_mat_path)
paths = {
'train': split_mat['trainvalsplit'][0][0][0][:, 0],
'val': split_mat['trainvalsplit'][0][0][1][:, 0],
'test': split_mat['alltest'][0]
}
for split_key in ['train', 'val', 'test']:
objects_per_image = []
split_paths = paths[split_key]
os.makedirs(os.path.join(MARGIN_IMG_DIR, split_key), exist_ok=True)
os.makedirs(os.path.join(ANNOTATIONS_DIR, split_key), exist_ok=True)
for sample_dir in tqdm(split_paths, desc=split_key):
sample_dir = sample_dir[0]
sample_dir = os.path.relpath(sample_dir, '/n/fs/sun3d/data')
img_id = sample_dir.replace(os.sep, '_').replace('-', '_')
sample_dir = os.path.join(SOURCE_PATH, sample_dir)
mat = scipy.io.loadmat(os.path.join(sample_dir, 'seg.mat'))
seg_mat = mat['seglabel'].transpose()
objects_per_image.append(len(np.unique(seg_mat)))
np.save(os.path.join(ANNOTATIONS_DIR, split_key, f'{img_id}_obj_mask.npy'), seg_mat)
print("{:s} set. Average objects per image: {:.2f}".format(split_key, np.mean(objects_per_image)))
if __name__ == '__main__':
argh.dispatch_commands([preprocess_sun, preprocess_sun_obj_masks])
|
from spike import simulator
import machine,time
class ForceSensor:
#If ISDEBUG is true. then all modules send debug information through console
ISDEBUG = True
#The PIN for Force Sensor (Potentiometer) ADC0
FORCE_PIN_PORTA = 36
FORCE_PIN_PORTB = 36
FORCE_PIN_PORTC = 36
FORCE_PIN_PORTD = 36
FORCE_PIN_PORTE = 36
FORCE_PIN_PORTF = 36
#If the measured value is higher than SIMULATORSWITCHMAX value then the while cycle is over or a press event occurs. (Inverse setting is possible)
SIMULATORSWITCHMAX=80
#If the measured value is lower than SIMULATORSWITCHMAX value then the while cycle is over or a press event occurs. (Inverse setting is possible)
SIMULATORSWITCHMIN=0
def __init__(self,port):
self.port = port
forceLEDpin = self.FORCE_PIN_PORTA
if port == 'A': forceLEDpin = self.FORCE_PIN_PORTA
if port == 'B': forceLEDpin = self.FORCE_PIN_PORTB
if port == 'C': forceLEDpin = self.FORCE_PIN_PORTC
if port == 'D': forceLEDpin = self.FORCE_PIN_PORTD
if port == 'E': forceLEDpin = self.FORCE_PIN_PORTE
if port == 'F': forceLEDpin = self.FORCE_PIN_PORTF
self.adc = machine.ADC(machine.Pin(forceLEDpin))
self.FORCE_PIN_PORTA
self.adc.atten(machine.ADC.ATTN_11DB) #Full range: 3.3v
self.newton = 0
self.percentage = 0
self.simulator = simulator.Simulator()
if(self.ISDEBUG):print("ForceSensor->__init__(port=",port,"). Force sensor is initialised in debug mode. Force sensor PIN:",forceLEDpin,", Switching threshold: ",self.SIMULATORSWITCHMAX," Change at spike.force_sensor.py ")
def get_force_newton(self):
if(self.ISDEBUG):print("ForceSensor->get_force_newton(). Retrieves the measured force, in newtons.")
self.newton = self.simulator.get_new_value(
isdebug=self.ISDEBUG,
newreading= self.adc.read(),
minvalue= 0,
maxvalue= 10,
minreading=0,
maxreading=4095)
return self.newton
def get_force_percentage(self):
if(self.ISDEBUG):print("ForceSensor->get_force_percentage(). Retrieves the measured force, in percentages.")
self.percentage = self.simulator.get_new_value(
isdebug=self.ISDEBUG,
newreading= self.adc.read(),
minvalue= 0,
maxvalue= 100,
minreading=0,
maxreading=4095)
return self.percentage
def is_pressed(self):
if(self.ISDEBUG):print("ForceSensor->is_pressed(). Tests whether the button on the force sensor is pressed.")
self.percentage = self.simulator.get_new_value(
isdebug=self.ISDEBUG,
newreading= self.adc.read(),
minvalue= 0,
maxvalue= 100,
minreading=0,
maxreading=4095)
if self.percentage < self.SIMULATORSWITCHMAX :
if(self.ISDEBUG):
print("force sensor button is pressed")
return True
else:
return False
def wait_until_pressed(self):
if(self.ISDEBUG):print("ForceSensor->wait_until_pressed(). Waits until the Force Sensor is pressed.")
runwhile = True
while runwhile:
self.percentage = self.simulator.get_new_value(
isdebug=self.ISDEBUG,
newreading= self.adc.read(),
minvalue= 0,
maxvalue= 100,
minreading=0,
maxreading=4095)
if(self.ISDEBUG):print("Do changes at force sensor new reading:", str(self.percentage), " to exit it should be lower then:", self.SIMULATORSWITCHMIN , " or it should be higher then:", self.SIMULATORSWITCHMAX )
if self.percentage < self.SIMULATORSWITCHMIN or self.percentage > self.SIMULATORSWITCHMAX :
runwhile = False
else:
time.sleep_ms(500)
def wait_until_released(self):
if(self.ISDEBUG):print("ForceSensor->wait_until_released(). Waits until the Force Sensor is released.")
runwhile = True
while runwhile:
self.percentage = self.simulator.get_new_value(
isdebug=self.ISDEBUG,
newreading= self.adc.read(),
minvalue= 0,
maxvalue= 100,
minreading=0,
maxreading=4095)
if(self.ISDEBUG):print("Do changes at force sensor new reading:", str(self.percentage), " to exit it should be higher or equal then:", self.SIMULATORSWITCHMIN , " or it should be lower or equal then:", self.SIMULATORSWITCHMAX )
if self.percentage >= self.SIMULATORSWITCHMIN and self.percentage <= self.SIMULATORSWITCHMAX :
runwhile = False
else:
time.sleep_ms(500) |
from flask import Flask
#Make the actual Application
app = Flask(__name__)
#Make the route
@app.route("/")
#Define a function
def hello():
return "Hello beautiful world!"
|
# !/usr/bin/env python3
# encoding: utf-8
"""
@version: 0.1
@author: feikon
@license: Apache Licence
@contact: crossfirestarer@gmail.com
@site: https://github.com/feikon
@software: PyCharm
@file: problen_0007.py
@time: 2017/6/15 9:19
"""
# Problem describe:count code lines,inlude comment and blank lines
# Problem solve step:
# 1.Open and read the file;
# 2.count all the lines, count begin with #(comment),count blank lines;
import logging
import os
from os.path import join
import re
def lines_count(file):
count_lines = 1
count_comment_lines = 0
count_blank_lines = 0
try:
with open(file) as f:
lines = f.readlines() # return every row content and combine a list
for line in lines:
count_lines += 1
# fixme:re表达式的匹配
if re.match(r'^#', line) is None:
pass
else:
count_comment_lines += 1
if line[0] == '\n':
count_blank_lines += 1
else:
pass
except IOError as e:
logging.log(level=IOError, msg=e)
print('{} file lines are {},comment lines are {},blank lines are {}'
.format(file, count_lines, count_comment_lines, count_blank_lines))
def walk_path(path):
file_list = list()
for root, dirs, files in os.walk(path):
for name in files:
filename = join(root, name)
if filename.split('.')[1] == 'py':
file_list.append(filename)
return file_list
if __name__ == '__main__':
code_files = walk_path('D:/show_me/code_files')
for code_file in code_files:
lines_count(code_file)
|
# expose the code in the file qcSTR/qcSTR.py
# through the statement import qcSTR
# instead of through import qcSTR.qcSTR
from qcSTR.qcSTR import *
|
#!python
#-*- coding:utf-8 -*-
import os,sys,base64,hashlib,time
from Crypto import Random
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
def KeysNew(bit = 2048):
rnd_generator = Random.new().read
rsa = RSA.generate(bit,rnd_generator)
secret_pem = rsa.exportKey()
public_pem = rsa.publickey().exportKey()
if not os.path.exists('ssl'):
os.mkdir('ssl')
with open('ssl/secret.pem','wb') as file:
file.write(secret_pem)
with open('ssl/public.pem','wb') as file:
file.write(public_pem)
print('finish export pem files')
def encrypt(strtxt):
with open('ssl/public.pem','r') as file:
rsakey = RSA.importKey(file.read())
cipher = PKCS1_v1_5.new(rsakey)
enctxt = base64.b64encode(cipher.encrypt(strtxt.encode(encoding = 'utf-8')))
return enctxt
def decrypt(strtxt):
with open('ssl/secret.pem','r') as file:
rsakey = RSA.importKey(file.read())
cipher = PKCS1_v1_5.new(rsakey)
dectxt = cipher.decrypt(base64.b64decode(strtxt),'ERROR')
return dectxt
def calmd5(strtxt):
md5gen = hashlib.md5()
md5gen.update(strtxt)
return(md5gen.hexdigest())
def test(root = '.'):
def recursion(mpath):
if os.path.isfile(mpath):
return ['{0}-{1}'.format(os.path.relpath(mpath,root),os.path.getsize(mpath))]
elif os.path.isdir(mpath):
lst = os.listdir(mpath)
tmp = []
for x in lst:
fpath = os.path.join(mpath,x)
tmp.extend(recursion(fpath))
return tmp
filelist = recursion(root)
print u'\n总共{0}个文件'.format(len(filelist))
md5list = [calmd5(x) for x in filelist]
md5list.sort()
strtxt = ','.join(md5list)
md5str = calmd5(strtxt)
enctxt = encrypt(md5str)
print u'\n原始字符串:',md5str
print u'\n加密字符串:',enctxt
print u'\n解密字符串:',decrypt(enctxt)
if __name__ == '__main__':
start = time.time()
if not os.path.exists('ssl'):
KeysNew()
test()
print(u'\n用时{0}s'.format(time.time()-start))
|
import logging
import pytest
import peewee
from playhouse.sqlite_ext import SqliteExtDatabase
test_db = SqliteExtDatabase(':memory:')
peewee_logger = logging.getLogger('peewee')
peewee_logger.addHandler(logging.StreamHandler())
peewee_logger.setLevel(logging.INFO)
import censere.models.triggers as TRIGGERS
import censere.models.functions as FUNC
FUNC.register_all( test_db )
@pytest.fixture(scope="module")
def database():
global test_db
return test_db
|
from . models import Quote
import urllib.request,json
from .email import mail_message
base_url=None
def configure_request(app):
global base_url
base_url=app.config['QUOTE_BASE_URL']
# base_url='http://quotes.stormconsultancy.co.uk/random.json'
def get_quote():
'''
Function that gets a random quote
'''
print(base_url)
with urllib.request.urlopen(base_url) as url:
get_quote_data=url.read()
quote_response=json.loads(get_quote_data)
quote_result=None
if quote_response:
quote_result=map_quote_result(quote_response)
return quote_result
def map_quote_result(quote_obj):
id=quote_obj.get('id')
author=quote_obj.get('author')
quote=quote_obj.get('quote')
permalink=quote_obj.get('permalink')
new_quote=Quote(id,author,quote,permalink)
return new_quote
def subscriber_alert(subscriber_list,post):
if subscriber_list:
for subscriber in subscriber_list:
mail_message("New post in Fluent Exchange","email/new_post",subscriber.email,post=post,subscriber=subscriber)
return
|
class Backend(object):
"""The backend is responsible for finding and creating DS4 devices."""
__name__ = "backend"
def __init__(self, manager):
self.logger = manager.new_module(self.__name__)
def setup(self):
"""Initialize the backend and make it ready for scanning.
Raises BackendError on failure.
"""
raise NotImplementedError
@property
def devices(self):
"""This iterator yields any devices found."""
raise NotImplementedError
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from . import config
from . import helpers
from . import exceptions
# Module API
def validate(source, scheme=None, format=None):
"""https://github.com/frictionlessdata/tabulator-py#validate
"""
# Get scheme and format
detected_scheme, detected_format = helpers.detect_scheme_and_format(source)
scheme = scheme or detected_scheme
format = format or detected_format
# Validate scheme and format
if scheme is not None:
if scheme not in config.LOADERS:
raise exceptions.SchemeError('Scheme "%s" is not supported' % scheme)
if format not in config.PARSERS:
raise exceptions.FormatError('Format "%s" is not supported' % format)
return True
|
from output.models.nist_data.list_pkg.g_month_day.schema_instance.nistschema_sv_iv_list_g_month_day_length_2_xsd.nistschema_sv_iv_list_g_month_day_length_2 import NistschemaSvIvListGMonthDayLength2
__all__ = [
"NistschemaSvIvListGMonthDayLength2",
]
|
# ------------------------------- /usr/bin/g++-7 ------------------------------#
# ------------------------------- coding: utf-8 -------------------------------#
# Criado por: Jean Marcelo Mira Junior
# Lucas Daniel dos Santos
# Versão: 1.0
# Criado em: 13/04/2021
# Sistema operacional: Linux - Ubuntu 20.04.1 LTS
# Python 3
# ------------------------------ Pacotes --------------------------------------#
import numpy as np
import matplotlib.pyplot as plt
from openpyxl import Workbook
# -----------------------------------------------------------------------------#
def edoEuler(f, r0, s0, nPassos, h):
"""
Método numérico de Euler que consiste em
utilizar as retas tangentes para encontrar
o valor da função em um ponto próximo.
Recebe:
f => Função que vai ser resolvida
r0 => Armazena o valor da função inicial
s0 => Armazena os passos de tempo inicial
nPassos => Quantidade de passos de tempo
h => Distância
Retorna:
s => Variável independente
r => Variável/Função dependente
"""
nEquacoes = len(r0)
r = np.zeros([nPassos, nEquacoes], dtype=np.float32)
s = np.zeros(nPassos, dtype=np.float32)
r[0] = r0
s[0] = s0
for n in range(0, nPassos - 1):
k1 = f(s[n], r[n])
r[n + 1] = r[n] + k1 * h
s[n + 1] = s[n] + h
return(s, r)
def edoHeun(f, r0, s0, nPassos, h):
"""
Método numérico de Heun é o método
numérico de Euler melhorado.
Recebe:
f => Função que vai ser resolvida
y0 => Armazena o valor da função inicial
s0 => Armazena os passos de tempo inicial
nPassos => Quantidade de passos de tempo
h => Distância
Retorna:
s => Variável independente
r => Variável/Função dependente
"""
nEquacoes = len(r0)
r = np.zeros([nPassos, nEquacoes], dtype=np.float32)
s = np.zeros(nPassos, dtype=np.float32)
r[0] = r0
s[0] = s0
for n in range(0, nPassos - 1):
s[n + 1] = s[n] + h
k1 = f(s[n], r[n])
k2 = f(s[n + 1], r[n] + h * k1)
r[n + 1] = r[n] + (1 / 2) * (k1 + k2) * h
return(s, r)
def edoRungeKutta(f, r0, s0, nPassos, h):
"""
Método numérico de Runge-Kutta é obtido
utilizando uma expansão em série de Taylor
solicitando um erro local de determinada ordem.
Recebe:
f => Função que vai ser resolvida
y0 => Armazena o valor da função inicial
s0 => Armazena os passos de tempo inicial
nPassos => Quantidade de passos de tempo
h => Distância
Retorna:
s => Variável independente
r => Variável/Função dependente
"""
nEquacoes = len(r0)
r = np.zeros([nPassos, nEquacoes], dtype=np.float32)
s = np.zeros(nPassos, dtype=np.float32)
r[0] = r0
s[0] = s0
for n in range(0, nPassos - 1):
s[n + 1] = s[n] + h
k1 = f(s[n], r[n])
k2 = f(s[n] + h * (1 / 2), r[n] + h * k1 * (1 / 2))
k3 = f(s[n] + h * (1 / 2), r[n] + h * k2 * (1 / 2))
k4 = f(s[n + 1], r[n] + h * k3)
r[n + 1] = r[n] + h*(k1 + 2 * k2 + 2 * k3 + k4) * (1 / 6)
return(s, r)
def edoRungeKuttaFehlberg(f, r0, s0, nPassos, h, tol, k, alpha):
"""
Método numérico de Runge-Kutta-Fehlberg é obtido
utilizando uma expansão em série de Taylor
solicitando um erro local de determinada ordem,
com o passo de h variável.
Recebe:
f => Função que vai ser resolvida
y0 => Armazena o valor da função inicial
s0 => Armazena os passos de tempo inicial
nPassos => Quantidade de passos de tempo
h => Distância variante
tol => Tolerancia de erro
k => Ordem do método
a => Valor alpha
Retorna:
s => Variável independente
r => Variável/Função dependente
"""
nEquacoes = len(r0)
r = np.zeros([nPassos, nEquacoes], dtype=np.float32)
s = np.zeros(nPassos, dtype=np.float32)
q = np.zeros(nPassos, dtype=np.float32)
y4 = np.zeros(nPassos, dtype=np.float32)
y5 = np.zeros(nPassos, dtype=np.float32)
r[0] = r0
s[0] = s0
qMin = 0
for n in range(0, nPassos - 1):
while (True):
k1 = f(s[n], r[n])
k2 = f(s[n] + h * (1 / 4), r[n] + h * k1 * (1 / 4))
k3 = f(s[n] + h * (3 / 8), r[n] + h * ((k1 * 3 + k2 * 9) / 32))
k4 = f(s[n] + h * (12 / 13), r[n] + h *
((k1 * 1932 - k2 * 7200 + k3 * 7296) / 2197))
k5 = f(s[n] + h, r[n] + h * (k1 * (439 / 216) - k2 *
8 + k3 * (3680 / 513) - k4 * (845 / 4104)))
k6 = f(s[n] + h * (1 / 2), r[n] + h * (- k1 * (8 / 27) + k2 *
2 - k3 * (3544 / 2565) + k4 * (1859 / 4104) - k5 * (11 / 40)))
y4 = (r[n] + h * (k1 * (25 / 216) + k3 *
(1408 / 2565) + k4 * (2197 / 4104) - k5 * (1 / 5)))
y5 = (r[n] + h * (k1 * (16 / 135) + k3 * (6656 / 12825) +
k4 * (28561 / 56430) - k5 * (9 / 50) + k6 * (2 / 55)))
q = alpha * ((tol * h) / abs(y4 - y5)) ** (1 / k)
qMin = min(q)
if(qMin >= 1):
break
else:
h = qMin * h
s[n + 1] = s[n] + h
r[n + 1] = y5
h = qMin * h
return(s, r)
def grafico(se, re, sh, rh, sr, rr, sf, rf):
"""
Salva os dados em uma planilha
Recebe:
se => Variável independente
re => Dados de Euler
sh => Variável independente
rh => Dados de Heun
sr => Variável independente
rr => Dados de Runge-Kutta
sf => Variável independente
rf => Dados de Runge-Kutta-Fehlberg
Retorna: NADA
"""
fi1, r1, z1 = re.T
fi2, r2, z2 = rh.T
fi3, r3, z3 = rr.T
fi4, r4, z4 = rf.T
fig, (fi, r, z, gota) = plt.subplots(
nrows=1, ncols=4, sharex=False, sharey=False, figsize=(16, 10))
fi.set_title('Gráfico de fi x s')
fi.set_xlabel("s")
fi.set_ylabel("fi")
fi.plot(se, fi1, 'g.', color='red', label='Euler')
fi.plot(sh, fi2, 'g.', color='green', label='Heun')
fi.plot(sr, fi3, 'g.', color='blue', label='Runge-Kutta')
fi.plot(sf, fi4, 'g.', color='yellow', label='Runge-Kutta-Fehlberg')
# fi.legend()
r.set_title('Gráfico de r x s')
r.set_xlabel("s")
r.set_ylabel("r")
r.plot(se, r1, 'g.', color='red', label='Euler')
r.plot(sh, r2, 'g.', color='green', label='Heun')
r.plot(sr, r3, 'g.', color='blue', label='Runge-Kutta')
r.plot(sf, r4, 'g.', color='yellow', label='Runge-Kutta-Fehlberg')
# r.legend()
z.set_title('Gráfico de z x s')
z.set_xlabel("s")
z.set_ylabel("z")
z.plot(se, z1, 'g.', color='red', label='Euler')
z.plot(sh, z2, 'g.', color='green', label='Heun')
z.plot(sr, z3, 'g.', color='blue', label='Runge-Kutta')
z.plot(sf, z4, 'g.', color='yellow', label='Runge-Kutta-Fehlberg')
# z.legend()
gota.set_title("Gráfico da Gota - z x r")
gota.set_aspect(1)
gota.set_xlabel("r")
gota.set_ylabel("z")
gota.plot(r1, z1, 'g.', color='red', label='Euler')
gota.plot(r2, z2, 'g.', color='green', label='Heun')
gota.plot(r3, z3, 'g.', color='blue', label='Runge-Kutta')
gota.plot(r4, z4, 'g.', color='yellow', label='Runge-Kutta-Fehlberg')
gota.legend(loc='upper right', bbox_to_anchor=(1, -0.3))
plt.subplot_tool()
fig.suptitle('Resultados')
# plt.saveaxs("resultados.png")
plt.show()
def gota(re, rh, rr, rf):
"""
Faz o grafico da gota (z x r)
Recebe:
re => Dados de Euler
rh => Dados de Heun
rr => Dados de Runge-Kutta
rf => Dados de Runge-Kutta-Fehlberg
Retorna: NADA
"""
fi1, r1, z1 = re.T
fi2, r2, z2 = rh.T
fi3, r3, z3 = rr.T
fi4, r4, z4 = rf.T
gota = plt.subplot(1, 1, 1)
gota.set_title("Gráfico da Gota - z x r")
gota.set_aspect(1)
gota.plot(r1, z1, 'g--', color='red', label='Euler')
gota.plot(r2, z2, 'g--', color='green', label='Heun')
gota.plot(r3, z3, 'g--', color='blue', label='Runge-Kutta')
gota.plot(r4, z4, 'g--', color='yellow', label='Runge-Kutta-Fehlberg')
gota.legend(loc='upper right', bbox_to_anchor=(-0.3, 1.0))
# plt.saveaxs("r_z.png")
plt.show()
def planilha(nPassos, se, re, sh, rh, sr, rr, sf, rf):
"""
Salva os dados em uma planilha
Recebe:
se => Variável independente
re => Dados de Euler
sh => Variável independente
rh => Dados de Heun
sr => Variável independente
rr => Dados de Runge-Kutta
sf => Variável independente
rf => Dados de Runge-Kutta-Fehlberg
Retorna: NADA
"""
fi1, r1, z1 = re.T
fi2, r2, z2 = rh.T
fi3, r3, z3 = rr.T
fi4, r4, z4 = rf.T
arquivo_excel = Workbook()
planilha1 = arquivo_excel.active
planilha1.title = "Dados"
planilha1['A1'] = 'sE'
planilha1['B1'] = 'fiE'
planilha1['C1'] = 'rE'
planilha1['D1'] = 'zE'
planilha1['E1'] = 'sH'
planilha1['F1'] = 'fiH'
planilha1['G1'] = 'rH'
planilha1['H1'] = 'zH'
planilha1['I1'] = 'sR'
planilha1['J1'] = 'fiR'
planilha1['K1'] = 'rR'
planilha1['L1'] = 'zR'
planilha1['M1'] = 'sF'
planilha1['N1'] = 'fiF'
planilha1['O1'] = 'rF'
planilha1['P1'] = 'zF'
for i in range(2, nPassos+2):
planilha1.cell(row=i, column=1, value=float(se[i-2]))
planilha1.cell(row=i, column=2, value=float(fi1[i-2]))
planilha1.cell(row=i, column=3, value=float(r1[i-2]))
planilha1.cell(row=i, column=4, value=float(z1[i-2]))
# ----------------------------------------------------
planilha1.cell(row=i, column=5, value=float(sh[i-2]))
planilha1.cell(row=i, column=6, value=float(fi2[i-2]))
planilha1.cell(row=i, column=7, value=float(r2[i-2]))
planilha1.cell(row=i, column=8, value=float(z2[i-2]))
# ----------------------------------------------------
planilha1.cell(row=i, column=9, value=float(sr[i-2]))
planilha1.cell(row=i, column=10, value=float(fi3[i-2]))
planilha1.cell(row=i, column=11, value=float(r3[i-2]))
planilha1.cell(row=i, column=12, value=float(z3[i-2]))
# ----------------------------------------------------
planilha1.cell(row=i, column=13, value=float(sf[i-2]))
planilha1.cell(row=i, column=14, value=float(fi4[i-2]))
planilha1.cell(row=i, column=15, value=float(r4[i-2]))
planilha1.cell(row=i, column=16, value=float(z4[i-2]))
arquivo_excel.save("Dados.xlsx")
|
"""
# Albatross DevNet scripts
## Usage
1. Run `devnet_create.py NUM_VALIDATORS`. This will create keys and configurations for multiple validator nodes.
2. Copy genesis config from `/tmp/nimiq-devnet-RANDOM/dev-albatross.toml` to `core-rs/genesis/src/genesis/dev-albatross.toml`.
3. Build core-rs: `cargo build`
4. Run seed node. Run a node (not as validator) at `127.0.0.1:8443`
5. Run `devnet_run.py PATH` (with `PATH=/tmp/nimiq-devnet-RANDOM`). This will start the validators.
## Notes
- The path to the `core-rs/target/debug` source code must be set in `devnet_create.py` and `devnet_run.py` in the `TARGET` variable.
- Logs files of the validators are in in `/tmp/nimiq-devnet-RANDOM/validatorNUM/nimiq-client.log`
"""
from binascii import unhexlify
from pathlib import Path
import sh
import json
from sys import argv
try:
num_validators = int(argv[1])
seed_uri = argv[2]
except (IndexError, ValueError):
print("Usage: {} NUM_VALIDATORS SEED_URI [OUTPUT]".format(argv[0]))
exit(1)
try:
output = Path(argv[3])
except IndexError:
output = Path("/tmp/nimiq-devnet")
target = Path.cwd() / "target" / "debug"
nimiq_address = sh.Command(str(target / "nimiq-address"))
nimiq_bls = sh.Command(str(target / "nimiq-bls"))
def create_bls_keypair():
lines = []
for l in nimiq_bls():
l = l.strip()
if l and not l.startswith("#"):
lines.append(l)
return {
"public_key": lines[0],
"private_key": lines[1]
}
def create_nimiq_address():
lines = []
for i, l in enumerate(nimiq_address()):
lines.append(l.split(":")[1].strip())
return {
"address": lines[0],
"address_raw": lines[1],
"public_key": lines[2],
"private_key": lines[3]
}
def create_validator(path, i):
path.mkdir(parents=True, exist_ok=True)
# create BLS keypair
validator_key = create_bls_keypair()
with (path / "validator_key.dat").open("wb") as f:
f.write(unhexlify(validator_key["private_key"]))
# create staking (and reward) address
staker_address = create_nimiq_address()
reward_address = create_nimiq_address()
# write config
with (path / "client.toml").open("wt") as f:
f.write("""
peer-key-file = "{path}/peer_key.dat"
[network]
host = "{hostname}"
port = {port}
seed_nodes = [
{{ uri = "{seed_uri}" }}
]
[consensus]
network = "dev-albatross"
[log]
level = "trace"
file = "{path}/nimiq-client.log"
timestamps = true
[database]
path = "{path}/"
[validator]
type = "validator"
block_delay = 250
key_file = "{path}/validator_key.dat"
""".format(
seed_uri=seed_uri,
hostname="127.0.1.{}".format(i + 1),
port=str(8500 + i),
path=str(path)
))
return {
"validator_key": validator_key,
"staker_address": staker_address,
"reward_address": reward_address,
"path": str(path)
}
print("Writing devnet to: {}".format(output))
print("Creating validators...")
validators = []
for i in range(num_validators):
validator = create_validator(output / "validator{:d}".format(i), i)
validators.append(validator)
print("Created validator: {}..".format(validator["validator_key"]["public_key"][0:16]))
print("Writing genesis config")
with (output / "dev-albatross.toml").open("wt") as f:
f.write("""
name = "dev-albatross"
seed_message = "Albatross DevNet"
signing_key = "230cf5070e9362108e3549360b84be23826c23839124b917629fb525db3baece"
timestamp="{timestamp}"
""".format(
#timestamp=datetime.utcnow().isoformat()
timestamp="2019-05-10T23:56:52.776772644+00:00"
))
for validator in validators:
f.write("""
[[stakes]]
staker_address = "{staker_address}"
reward_address = "{reward_address}"
validator_key = "{validator_key}"
balance = 100000000
""".format(
staker_address=validator["staker_address"]["address"],
reward_address=validator["reward_address"]["address"],
validator_key=validator["validator_key"]["public_key"]
))
print("Writing configuration")
with (output / "validators.json").open("wt") as f:
json.dump(validators, f)
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> [[int]]:
queue = [root]
res = []
if not root:
return []
while queue:
templist = []
templen = len(queue)
for i in range(templen):
temp = queue.pop(0)
templist.append(temp.val)
if temp.left:
queue.append(temp.left)
if temp.right:
queue.append(temp.right)
res.append(templist)
return res
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-07-15 21:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dlp', '0002_auto_20190712_1140'),
('core', '0002_auto_20190712_1140'),
]
operations = [
migrations.CreateModel(
name='DoubletInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('live_single', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of live single cells')),
('dead_single', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of dead single cells')),
('other_single', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of other single cells')),
('live_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of live doublet cells')),
('dead_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of dead doublet cells')),
('other_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of mixed doublet cells')),
('live_gt_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='More than two live cells')),
('dead_gt_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='More than two dead cells')),
('other_gt_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='More than two other cells')),
('library', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='dlp.DlpLibrary', verbose_name='Library')),
],
),
migrations.CreateModel(
name='HistoricalDoubletInformation',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('live_single', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of live single cells')),
('dead_single', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of dead single cells')),
('other_single', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of other single cells')),
('live_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of live doublet cells')),
('dead_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of dead doublet cells')),
('other_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of mixed doublet cells')),
('live_gt_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='More than two live cells')),
('dead_gt_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='More than two dead cells')),
('other_gt_doublet', models.IntegerField(blank=True, default=0, null=True, verbose_name='More than two other cells')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('library', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='dlp.DlpLibrary', verbose_name='Library')),
],
options={
'verbose_name': 'historical doublet information',
'db_table': 'doublet_information_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
# Copyright 2014 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# heng@kedevelopments.co.uk
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from .test_pyfftw_base import run_test_suites
import unittest
import pyfftw
import platform
import os
def get_cpus_info():
if 'Linux' in platform.system():
# A simple /proc/cpuinfo parser
with open(os.path.join('/', 'proc','cpuinfo'), 'r') as f:
cpus_info = []
idx = 0
for line in f.readlines():
if line.find(':') < 0:
idx += 1
continue
key, values = [each.strip() for each in line.split(':')]
try:
cpus_info[idx][key] = values
except IndexError:
cpus_info.append({key: values})
else:
cpus_info = None
return cpus_info
class UtilsTest(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
@unittest.skipIf('Linux' not in platform.system(),
'Skipping as we only have it set up for Linux at present.')
def test_get_alignment(self):
cpus_info = get_cpus_info()
for each_cpu in cpus_info:
if 'avx' in each_cpu['flags']:
self.assertTrue(pyfftw.simd_alignment == 32)
elif 'sse' in each_cpu['flags']:
self.assertTrue(pyfftw.simd_alignment == 16)
else:
self.assertTrue(pyfftw.simd_alignment == 1)
test_cases = (
UtilsTest,)
test_set = None
if __name__ == '__main__':
run_test_suites(test_cases, test_set)
|
from typing import Union, List
from collections import Iterable
from .filters import FilterCollection, FilterCondition
from .types import *
class Handler:
def __init__(self,func):
self.func = func
self.handlers.add(func)
def run(self, update):
for func in self.handlers:
func(update)
class onUpdate(Handler):
"""Response decorator for all Updates"""
handlers = set()
attr_name = 'update_id'
class onMessage(Handler):
"""Response decorator for Messages"""
handlers = set()
attr_name = 'message'
class onEditedMessage(Handler):
"""Response decorator for Edited Messages"""
handlers = set()
attr_name = 'edited_message'
class onMyChatMember(Handler):
"""Response decorator for Chat Members Activities"""
handlers = set()
attr_name = 'my_chat_member'
ALL_HANDLERS =[
onUpdate,
onMessage,
onEditedMessage,
onMyChatMember,
] |
'''
Given an integer n, return the next bigger permutation of its digits.
If n is already in its biggest permutation, rotate to the smallest permutation.
case 1
n= 5342310
ans=5343012
case 2
n= 543321
ans= 123345
'''
n = 5342310 # case 1
# n = 543321 # case 2
a = list(map(int, str(n)))
i = len(a)-2
# finding i such that a[i]...a[n-1] is decreasing suffix
while a[i] >= a[i+1]:
i -= 1
if i == 0:
# print('case 2')
a.reverse()
print(''.join(map(str, a)))
exit(0)
# print(i,a[i])
# finding j such that a[j]>a[i] and swapping them
for j in range(i+1, len(a)):
if a[j] > a[i]:
a[i], a[j] = a[j], a[i]
break
# reversing the decreasing suffix to make it increasing
a[i+1:] = reversed(a[i+1:])
# print('case 1')
print(''.join(map(str, a)))
exit(0) |
class Foo:
pass
f = Foo()
f.bar = {}
#f.bar("A")
#del f.bar("A")
#f.bar("A") = 1
del [1] |
class Reverse:
"""Iterator for looping over a sequence backward."""
def __init__(self, data):
self.data = data
self.index = len(data)
def __iter__(self):
return self
def __next__(self):
if self.index == 0:
raise StopIteration
else:
self.index -= 1
return self.data[self.index]
if __name__ == '__main__':
rev = Reverse('Spam')
for char in rev:
print(char)
"""
m
a
p
S
"""
|
from contextlib import contextmanager
from unittest.mock import MagicMock
from glue import core
from glue.core.application_base import Application
from glue.tests.helpers import make_file
@contextmanager
def simple_catalog():
"""Context manager to create a temporary data file
:param suffix: File suffix. string
"""
with make_file(b'#a, b\n1, 2\n3, 4', '.csv') as result:
yield result
def simple_session():
collect = core.data_collection.DataCollection()
hub = core.hub.Hub()
result = core.Session(data_collection=collect, hub=hub,
application=MagicMock(Application),
command_stack=core.CommandStack())
result.command_stack.session = result
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.