content
stringlengths 5
1.05M
|
|---|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.aten import ATen
from mo.front.extractor import FrontExtractorOp
from mo.front.onnx.extractors.utils import onnx_attr
class ATenFrontExtractor(FrontExtractorOp):
op = 'ATen'
enabled = True
@classmethod
def extract(cls, node):
mode = onnx_attr(node, 'mode', 'i', default=1)
operator = onnx_attr(node, 'operator', 's').decode()
ATen.update_node_stat(node, {'operator': operator, 'mode': mode})
return cls.enabled
|
from tests.chart_tests.helm_template_generator import render_chart
import jmespath
import pytest
from tests import supported_k8s_versions
@pytest.mark.parametrize(
"kube_version",
supported_k8s_versions,
)
class TestIngress:
def test_astro_ui_deployment(self, kube_version):
docs = render_chart(
kube_version=kube_version,
values={
"astronomer": {
"astroUI": {
"resources": {
"requests": {"cpu": "100m", "memory": "256Mi"},
"limits": {"cpu": "500m", "memory": "1024Mi"},
}
}
}
},
show_only=["charts/astronomer/templates/astro-ui/astro-ui-deployment.yaml"],
)
assert "Deployment" == jmespath.search("kind", docs[0])
assert "release-name-astro-ui" == jmespath.search("metadata.name", docs[0])
assert "astro-ui" == jmespath.search(
"spec.template.spec.containers[0].name", docs[0]
)
assert "500m" == jmespath.search(
"spec.template.spec.containers[0].resources.limits.cpu", docs[0]
)
|
version = "1.0.0.dev"
date = "2015-January-8"
author = "Jeremy Schulman, @nwkautomaniac"
|
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import time
from collections import deque
import numpy as np
from PyQt5.QtCore import QTimer
from extra_foam.gui import mkQApp
from extra_foam.gui.plot_widgets import ImageViewF
app = mkQApp()
class BenchmarkImageViewSpeed:
def __init__(self):
self._dt = deque(maxlen=60)
self._timer = QTimer()
self._timer.timeout.connect(self.update)
self._data = np.random.normal(size=(50, 1024, 1280))
self._prev_t = None
self._count = 0
self._view = ImageViewF()
self._view.show()
def start(self):
self._prev_t = time.time()
self._timer.start(0)
def update(self):
self._view.setImage(self._data[self._count % 10])
self._count += 1
now = time.time()
self._dt.append(now - self._prev_t)
self._prev_t = now
fps = len(self._dt) / sum(self._dt)
self._view.setTitle(f"{fps:.2f} fps")
app.processEvents() # force complete redraw for every plot
if __name__ == '__main__':
bench = BenchmarkImageViewSpeed()
bench.start()
app.exec_()
|
from board import TicTacToeBoard
from move import BoardMove
class Game:
X = "X"
O = "O"
def __init__(self):
self._board = TicTacToeBoard()
self._player = self.X
self._running = True
self._has_winner = False
self._moves = []
@property
def running(self):
return self._running
@property
def has_winner(self):
return self._has_winner
@property
def board(self):
return self._board
@property
def player(self):
return self._player
@property
def opponent(self):
if self.player == self.X:
return self.O
return self.X
@property
def winning_move(self):
return [self.player] * self.board.BOARD_SIZE
def count_empty_squares(self):
return (self.board.BOARD_SIZE ** 2) - len(self._moves)
def change_turn(self):
if self.running:
self._player = self.opponent
def play_turn(self, move):
if self.running:
if self.valid_move(move):
self._moves.append(move)
self.board.make_move(move, self.player)
self.check_game_over()
self.change_turn()
def check_game_over(self):
if self.is_game_won():
self.set_game_won()
self.end_game()
if self.board_filled():
self.end_game()
def board_filled(self):
return self.board.full_board()
def at(self, position):
return self.board.at(position)
def has_moves_on_board(self):
return bool(self._moves)
def last_move(self):
if self.has_moves_on_board():
self._moves[-1]
def undo_move(self, move):
self.board.clear_square(move)
def valid_move(self, move):
return self.board.valid_move(move)
def is_game_won(self):
if self.check_rows() or self.check_columns() or self.check_diagonal() or self.check_antidiagonal():
return True
return False
def set_game_won(self):
self._has_winner = True
self.winner = self.player
def check_rows(self):
for i in range(self.board.BOARD_SIZE):
if self.winning_move == self.board.get_row(i):
return True
return False
def check_columns(self):
for i in range(self.board.BOARD_SIZE):
if self.winning_move == self.board.get_column(i):
return True
return False
def check_diagonal(self):
return self.winning_move == self.board.get_diagonal()
def check_antidiagonal(self):
return self.winning_move == self.board.get_antidiagonal()
def all_possible_moves(self):
moves = []
for i in range(self.board.BOARD_SIZE):
for j in range(self.board.BOARD_SIZE):
move = BoardMove(i, j)
if self.board.is_empty(move):
moves.append(move)
return moves
def end_game(self):
self._running = False
|
#!/usr/bin/env python
from __future__ import print_function
import random
import time
from progress.bar import (Bar, ChargingBar, FillingSquaresBar, FillingCirclesBar, IncrementalBar, PixelBar, ShadyBar)
from progress.spinner import (Spinner, PieSpinner, MoonSpinner, LineSpinner, PixelSpinner)
from progress.counter import Counter, Countdown, Stack, Pie
def sleep():
t = 0.01
t += t * random.uniform(-0.1, 0.1) # Add some variance
time.sleep(t)
for bar_cls in (Bar, ChargingBar, FillingSquaresBar, FillingCirclesBar):
suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]'
bar = bar_cls(bar_cls.__name__, suffix=suffix)
for i in bar.iter(range(200)):
sleep()
for bar_cls in (IncrementalBar, PixelBar, ShadyBar):
suffix = '%(percent)d%% [%(elapsed_td)s / %(eta)d / %(eta_td)s]'
bar = bar_cls(bar_cls.__name__, suffix=suffix)
for i in bar.iter(range(200)):
sleep()
for spin in (Spinner, PieSpinner, MoonSpinner, LineSpinner, PixelSpinner):
for i in spin(spin.__name__ + ' ').iter(range(100)):
sleep()
print()
for singleton in (Counter, Countdown, Stack, Pie):
for i in singleton(singleton.__name__ + ' ').iter(range(100)):
sleep()
print()
bar = IncrementalBar('Random', suffix='%(index)d')
for i in range(100):
bar.goto(random.randint(0, 100))
sleep()
bar.finish()
|
import park
from park.core import Env, Space
from park.envs.multi_dim_index.params import Params as params
from park.envs.multi_dim_index.spaces import ActionSpace, DataObsSpace, QueryObsSpace
from park.envs.multi_dim_index.config import Action, DataObs, QueryObs, Query
from park.envs.multi_dim_index.gen_osm_queries import QueryGen
from park.envs.multi_dim_index.monotonic_rmi2 import MonotonicRMI
import numpy as np
from park.spaces.tuple_space import Tuple
import wget
import os
import random
import stat
import sys
import subprocess
from timeit import default_timer as timer
class MultiDimIndexEnv(Env):
metadata = {'env.name': 'multi_dim_index'}
# Rewards are reported as throughput (queries per second)
reward_range = (0, 1e6)
action_space = ActionSpace()
observation_space = Tuple((DataObsSpace, QueryObsSpace))
def __init__(self):
datafile = params.DATASET_PATH
if not os.path.exists(datafile):
dr = os.path.split(datafile)[0]
if not os.path.isdir(dr):
os.makedirs(dr)
print('Downloading dataset...')
wget.download(params.DATA_DOWNLOAD_URL, out=datafile)
# Newline because wget doesn't print it out
print('')
binary = params.BINARY_PATH
if not os.path.exists(binary):
dr = os.path.split(binary)[0]
if not os.path.isdir(dr):
os.makedirs(dr)
print('Downloading binary...')
wget.download(params.BINARY_DOWNLOAD_URL, out=binary)
os.chmod(binary, stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
print('')
self.gen_data_summary()
self.step_count = 0
self.query_generator = None
def parse_cmd_output(self, output):
lines = output.split('\n')
times = []
for line in lines:
if line.startswith('Query'):
time = int(line.split(':')[1].strip())
times.append(time)
return times
def step(self, action):
assert self.action_space.contains(action)
layout_filename = park.__path__[0] + "/envs/multi_dim_index/mdi_layout.dat"
action.tofile(layout_filename)
print('Generating next query workload...')
start = timer()
new_queries = []
for _ in range(params.QUERIES_PER_STEP):
q = self.query_generator.random_query()
new_queries.append(q)
query_filename = park.__path__[0] + "/envs/multi_dim_index/queries.bin"
np.array(new_queries).tofile(query_filename)
end = timer()
print('Generating query workload took', end-start, 's')
print('Running range query workload...')
start = timer()
cmd = [params.BINARY_PATH, "--dataset=%s" % params.DATASET_PATH,
"--workload=%s" % query_filename, "--projector=%s" % layout_filename,
"--folder=%s" % params.DATA_SUMMARY_DIR]
print(' '.join(cmd))
outfile = 'cmd_output.txt'
done = subprocess.run(cmd, stdout=open(outfile, 'w'), stderr=subprocess.STDOUT, encoding='utf-8')
if done.returncode != 0:
raise Exception('Query binary did not finish successfully')
times = []
times = self.parse_cmd_output(open(outfile).read())
if len(times) != len(new_queries):
raise Exception('Results from binary are incomplete')
end = timer()
print('Running range query workload took', end-start, 's')
reward = 1./np.mean(times)
obs = (DataObs(params.DATASET_PATH), QueryObs(new_queries))
self.step_count += 1
# The query times are given as information.
return obs, reward, self.step_count >= params.STEPS_PER_EPOCH, {"times": times}
def reset(self):
self.step_count = 0
# Restart the query generator with a new random configuration.
print('Initializing OSM Query generator...')
start = timer()
self.query_generator = QueryGen(params.DATASET_PATH)
end = timer()
print('Initializing OSM Query generator took', end-start, 's')
def seed(self, seed=None):
if seed is not None:
self.query_generator.seed(seed)
random.seed(seed+5)
# Generates a coarse summary of each data dimension, which the indexer uses to divvy up data
# into columns.
def gen_data_summary(self):
data = np.fromfile(params.DATASET_PATH, dtype=np.int64).reshape(-1, params.NDIMS)
for ix in range(params.NDIMS):
filename = f'{params.DATA_SUMMARY_DIR}/dim_{ix}_cdf.dat'
print(f'Generating CDF for dimension {ix}')
# Generate a CDF for that dimension
dim_data = np.sort(data[:, ix]).reshape(-1, 1)
dim_data_unique, cdf_range_unique = self.unique_cdf(dim_data, mode='bottom')
expert_sizes = self.sizes_from_uniques(dim_data_unique, [100, 1000])
cdf = MonotonicRMI(expert_sizes, last_layer_monotonic=True)
cdf.fit(dim_data_unique, cdf_range_unique, verbose=False)
cdf.dump(filename)
# When there are multiple points with the same value:
# - 'middle': the CDF of a point should be the location of the middle point
# - 'top': the CDF should be the location of the last of the points with the same value
# - 'bottom': the CDF should be the location of the first point
def unique_cdf(self, xs, mode='middle'):
uq, inds, counts = np.unique(xs, return_inverse=True, return_counts=True)
cdf = None
if mode == 'top':
cum_counts = np.cumsum(counts).astype(float) / len(xs)
cdf = cum_counts[inds]
xs = np.insert(xs, [0], [xs.min()-1], axis=0)
cdf = np.insert(cdf, 0, 0.0)
elif mode == 'middle':
cum_counts = (np.cumsum(counts) - (counts+1)/2).astype(float) / len(xs)
cdf = cum_counts[inds]
elif mode == 'bottom':
cum_counts = (np.cumsum(counts) - counts).astype(float) / len(xs)
cdf = cum_counts[inds]
cdf = np.insert(cdf, len(xs), 1.0)
xs = np.insert(xs, [len(xs)], [xs.max()+1], axis=0)
return xs, cdf
def sizes_from_uniques(self, data, locator_experts):
uq = np.unique(data)
if len(np.unique(uq)) < locator_experts[-1]:
return [1, int(np.sqrt(len(uq))), len(uq)]
return locator_experts
|
from . import api
v1 = [
(r"positioning/locate", api.LocateView, "positioning-locate"),
(r"positioning/beacons", api.BeaconViewSet, "positioning-beacons"),
]
|
"""some objects and functions supporting html actions and routes"""
import pickle
import numpy as np
import pandas as pd
import matplotlib as plt
from scipy.spatial import distance
# from sqlalchemy import func, distinct
# from sqlalchemy.sql import expression #(.exists, .select, ...)
from .spotify_client import *
# unpickle a trained kmeans algorithm and cluster-distances
pickled_kmeans = r"kmeans_pipe.sav"
pickled_index = r"df_index.sav"
pickled_locs = r"song_space_locs.sav"
kmeans_pipe = pickle.load(open(pickled_kmeans, 'rb'))
df_index = pickle.load(open(pickled_index, 'rb'))
song_space_locs = pickle.load(open(pickled_locs, 'rb'))
def suggest_ids(song_name, artist_name, count=100):
"""Compares a track to ~440,000 others, based on 13 numeric audio features, and returns the spotify_ids of 35 songs with similar cluster-distance coordinates; it would be cool if a button press would display the next closest set. It would be cooler if matplotlib displayed a 3D plot, with 3 drop-down menus for choosing any 3 features (of 13) for plot axes (or a 3D tSNE plot, not with audio features but with projections to abstract 3D space); and if the color of input song were bright color, similar to neighbors displayed in table, but different from the faded grey others"""
song_id, artist_id = retrieve_spotify_ids(song_name, artist_name)
features = retrieve_audio_features(song_id)
feats = ['danceability','energy','key','loudness','mode','speechiness','acousticness','instrumentalness','liveness','valence','tempo','duration_ms','time_signature']
model_input = [features[0][feat] for feat in feats]
song_space_base = kmeans_pipe.transform([model_input])
dists = distance.cdist(song_space_locs, song_space_base, 'cityblock')
dists = pd.DataFrame(dists, index=df_index).sort_values(by=0)[:count] #top10
spotify_ids = dists.index
return spotify_ids
def relevant_genres(tracks):
artist_names = [track.artist_name for track in tracks]
genres_nested_lists = [retrieve_genres(art) for art in artist_names]
genre_list = [g for g_list in genres_nested_lists for g in g_list]
return genre_list
|
import logging
import os
from logging import handlers
from fastapi import Depends, FastAPI, Request
from fastapi.logger import logger
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from source.config import LOG_LOCATION, STATIC_DIR
from source.dependencies import get_current_active_user
from source.note import note
from source.point import point
from source.summarize import summarize
from source.symbol import symbol
from source.users import users
from source.util.util_base.db import create_db_pool, get_multi_data
# log_filename = os.path.join(LOG_LOCATION, "future_picture.log")
# logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s",
# handlers=[handlers.RotatingFileHandler(log_filename, encoding='utf-8', maxBytes=1073741824, backupCount=20)],
# level=logging.INFO,
# datefmt="%Y-%m-%d %H:%M:%S")
# base_logger = logging.getLogger()
# logger.handlers = base_logger.handlers
# logger.setLevel(base_logger.level)
# logger.propagate = False
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s",
handlers=[handlers.RotatingFileHandler(os.path.join(LOG_LOCATION, "app.log"), encoding='utf-8', maxBytes=1073741824, backupCount=20)],
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S")
log_filename = os.path.join(LOG_LOCATION, "future_picture.log")
handler = handlers.RotatingFileHandler(log_filename, encoding='utf-8', maxBytes=1073741824, backupCount=5)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s:%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
logger.propagate = False
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.mount("/front", StaticFiles(directory=STATIC_DIR), name="front")
app.include_router(users)
# app.include_router(symbol, dependencies=[Depends(get_current_active_user)])
# app.include_router(point, dependencies=[Depends(get_current_active_user)])
# app.include_router(summarize, dependencies=[Depends(get_current_active_user)])
# app.include_router(manual_data, dependencies=[Depends(get_current_active_user)])
# app.include_router(note, dependencies=[Depends(get_current_active_user)])
app.include_router(symbol)
app.include_router(point)
app.include_router(summarize)
app.include_router(note)
db_pool = None
@app.exception_handler(ValueError)
async def http_exception_handler(request, exc):
return JSONResponse(str(exc), status_code=500)
@app.exception_handler(TypeError)
async def http_exception_handler(request, exc):
return JSONResponse(str(exc), status_code=500)
@app.exception_handler(IndexError)
async def http_exception_handler(request, exc):
return JSONResponse(str(exc), status_code=500)
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
async with db_pool.acquire() as db_conn:
# async with db_conn.cursor() as cursor:
request.state.db_conn = db_conn
response = await call_next(request)
return response
@app.on_event("startup")
async def startup():
# TODO 没找到地方放 db_pool, 暂时使用 global
global db_pool
db_pool = await create_db_pool()
# TODO 貌似 shutdown 这个 方法就没起作用, 暂时不知原因
@app.on_event("shutdown")
async def shutdown():
await db_pool.close()
await db_pool.wait_closed()
@app.get("/test")
async def read_notes(request: Request):
db_conn = request.state.db_conn
r = await get_multi_data(db_conn, "SELECT * from s_info where ts_code in %s;", [["603559.SH", "000001.SZ"]])
logger.info("sssssssssssss")
return r
@app.get("/")
async def index():
return FileResponse(os.path.join(STATIC_DIR, "index.html"))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=6677)
|
from abc import ABC, abstractmethod
import numpy as np
class Cost(ABC):
'This is an abstract class for Cost functions'
def __init__(self, name="Default Cost"):
self.name = name
@abstractmethod
def calculate(self, Y, yhat):
'''Y is data, yhat is prediction'''
self.cost = None
@abstractmethod
def derivative(self, Y, yhat):
self.dA = None
def __str__(self):
return f'{self.__class__.__name__} cost is {self.cost}'
class MSE(Cost):
'Mean Squared Error Cost'
def __init__(self):
super().__init__("MSE")
def calculate(self, Y, Yhat):
'''Returns normalized J = sum(|| Yhat - Y || ^ 2) / 2m'''
assert (Y.shape == Yhat.shape)
self.cost = np.square(Y - Yhat).mean()
return self.cost
def derivative(self, Y, Yhat):
'''Returns J' = Yhat - Y'''
self.dA = Yhat - Y
return self.dA
class CrossEntropy(Cost):
'''Loss function for binary classification'''
def __init__(self):
super().__init__(name="CrossEntropy")
def calculate(self, Y, Yhat):
'''Math:
-Y * log(Yhat) - (1 - Y) * log(1 - Yhat)
'''
self.cost = ((0 - Y) * np.log(Yhat) - (1 - Y) * np.log(1 - Yhat)).mean()
return self.cost
def derivative(self, Y, Yhat):
assert Y.shape == Yhat.shape
self.dA = (0 - Y) / Yhat + (1 - Y) / (1 - Yhat)
return self.dA
class CategoricalCrossEntropy(Cost):
'''Loss function for multi-class classification'''
def __init__(self):
super().__init__(name="CategoricalCrossEntropy")
def calculate(self, Y, Yhat):
'''Math:
-summation{yi ln( yhat i)}
'''
assert (Y.shape == Yhat.shape)
self.cost = -np.sum( Y * np.log(Yhat) , axis=0 ).mean()
return self.cost
def derivative(self, Y, Yhat):
'''Note: The return of this function is NOT a derivative.
It is assumed that the derivative is going to be fed into Softmax layer
during the propagation. Since derivative of the softmax layer
dZ = Yhat - Y, it's just easier to return that.
'''
assert Y.shape == Yhat.shape
self.dA = Yhat - Y
return self.dA
|
#!/usr/bin/env python
from .wrapper_zakharov import Zakharov
|
import itertools
def retry(delays=(0, 1, 5, 30, 180, 600, 3600),
exception=Exception,
report=lambda *args: None):
def wrapper(function):
def wrapped(*args, **kwargs):
problems = []
for delay in itertools.chain(delays, [ None ]):
try:
return function(*args, **kwargs)
except exception as problem:
problems.append(problem)
if delay is None:
report("retryable failed definitely:", problems)
raise
else:
report("retryable failed:", problem,
"-- delaying for %ds" % delay)
time.sleep(delay)
return wrapped
return wrapper
|
"""
Goals of this part of the examples:
1. Learn how to analyze the model of your energy system
2. Improve your `SimulationAPI` knowledge
3. Improve your skill-set on `TimeSeriesData`
4. Generate some measured data to later use in a calibration
"""
# Start by importing all relevant packages
import pathlib
import matplotlib.pyplot as plt
# Imports from ebcpy
from ebcpy import DymolaAPI, TimeSeriesData
def main(
aixlib_mo,
cd=None,
with_plot=True
):
"""
Arguments of this example:
:param str aixlib_mo:
Path to the package.mo of the AixLib.
This example was tested for AixLib version 1.0.0.
:param str cd:
Path in which to store the output.
Default is the examples\results folder
:param bool with_plot:
Show the plot at the end of the script. Default is True.
"""
# General settings
if cd is None:
cd = pathlib.Path(__file__).parent.joinpath("results")
else:
cd = pathlib.Path(cd)
example_path = pathlib.Path(__file__).parent
aixcalibuha_mo = example_path.joinpath("model", "AixCaliBuHAExamples.mo")
# ######################### System analysis ##########################
# The best way to analyze the model which we later want to calibrate,
# either pause here (set a debug point) or open the models in Dymola separately.
# Click through the system and subsystem to understand what happens in the model.
# As you may have guessed, the analysis of an energy system can be quite complex
# and is thus hard to automize. Before using AixCaliBuHA, you should understand
# what happens in your system. If you have questions regarding modeling assumptions,
# ask e.g. the model developers of the AixLib or the IBPSA.
# %% Setup the Dymola-API:
dym_api = DymolaAPI(
model_name="AixCaliBuHAExamples.HeatPumpSystemCalibration",
cd=cd,
packages=[
aixlib_mo,
aixcalibuha_mo
],
show_window=True,
equidistant_output=False
)
print("Pausing for analysis. Set the break point here if you like!")
# ######################### Data generation ##########################
# We want to exemplify the process of getting experimental data using
# the model we later want to calibrate.
# This is a good example for two reasons:
# 1. You really know the optimal parameters
# 2. We don't have to deal with measurement noise etc.
# For this example, we simulate 1 h with a 1 s sampling rate.
# For further simulation help, check out the ebcpy examples.
dym_api.set_sim_setup({
"stop_time": 3600,
"output_interval": 10
})
file_path = dym_api.simulate(
return_option="savepath"
)
# ######################### Data analysis ##########################
# Now let's analyze the data we've generated.
# Open the file first and extract variables of interest.
# We want to match electrical power consumption (Pel) and room comfort (TAir)
# in this example.
# As an input of the model, TDryBulSource.y
# represents the outdoor air temperature
tsd = TimeSeriesData(file_path)
tsd = tsd[["Pel", "TAir", "TDryBulSource.y"]]
# Check the frequency of the data:
print("Simulation had index-frequency of %s with "
"standard deviation of %s" % tsd.frequency)
# Due to state events, our data is not equally sampled.
# To later match the simulation data with a fixed output_interval,
# we thus have to process the data further.
# To do this, we have the function 'clean_and_space_equally'.
# It only works on datetime indexes, hence we convert first:
# Note: Real measured data would already contain DateTimeIndex anyways.
tsd.to_datetime_index()
# Save a copy to check if our resampling induces data loss:
tsd_reference = tsd.copy()
# Apply function
tsd.clean_and_space_equally(desired_freq="10s")
print("Simulation now has index-frequency of %s with "
"standard deviation of %s" % tsd.frequency)
# Let's check if the sampling changed our measured data:
plt.plot(tsd_reference['TAir'], color="blue", label="Reference")
plt.plot(tsd['TAir'], color="red", label="Resampled")
plt.legend()
if with_plot:
plt.show()
# ######################### Data saving ##########################
# In order to use this data in the other examples, we have to save it.
tsd_inputs = tsd[["TDryBulSource.y"]]
tsd_measurements = tsd[["Pel", "TAir"]]
tsd_inputs.save(example_path.joinpath("data", "measured_input_data.hdf"), key="example")
tsd_measurements.save(example_path.joinpath("data", "measured_target_data.hdf"), key="example")
print("Saved data under", example_path.joinpath("data"))
if __name__ == '__main__':
# TODO-User: Change the AixLib path!
main(
aixlib_mo=r"D:\02_workshop\AixLib\AixLib\package.mo",
)
|
#!/usr/bin/python3
import argparse
import os
import utils
import subprocess
from multiprocessing import Pool
parser = argparse.ArgumentParser(description='compute adv transfer strategy')
parser.add_argument('--dir', help='data dir, required', type=str, default=None)
parser.add_argument('-c', '--config', help='config file, default config.json', type=str, default='config.json')
parser.add_argument('-j', help='number of parallel process', type=int, default=1)
args = parser.parse_args()
_dir = args.dir
_threads = args.j
config = utils.load_json(os.path.join(_dir, args.config))
tasks = []
task_arg = config['task_arg']
cur_task_id = 0
for cur_mthd in config['mthd']:
cur_mthd_name = cur_mthd['name']
cur_mthd_transfer = cur_mthd['transfer']
cur_mthd_diff = cur_mthd['diff']
cur_mthd_config = []
for cur_strategy in config['strategy']:
cur_mthd_config.append({
"name": cur_strategy['name'],
"transfer": cur_strategy['transfer'] % cur_mthd_transfer,
"diff": cur_strategy['diff'] % cur_mthd_diff
})
os.makedirs(os.path.join(_dir, cur_mthd_name), exist_ok=True)
utils.save_json(os.path.join(_dir, cur_mthd_name, 'config.json'), cur_mthd_config, indent=4)
dest = os.path.join(_dir, cur_mthd_name + '-l2_mn.pdf')
src = os.path.join(cur_mthd_name, cur_mthd_name + '-l2_mn.pdf')
if os.path.exists(dest):
os.unlink(dest)
os.symlink(src, dest)
tasks.append({
'type': "strategy",
'name': cur_mthd_name,
'task': cur_task_id,
'args': ['python3', 'compute_adv_diff_bin_vs_trans.py',
'--dir=' + os.path.join(_dir, cur_mthd_name), '--output=' + cur_mthd_name, task_arg]
})
cur_task_id += 1
def worker(config):
p = subprocess.Popen(' '.join(config['args']), stderr=None, stdout=None, shell=True)
p.wait()
print('Finish(', p.returncode, '):', config['task'], config['type'], config['name'])
def run_task(func, tasks):
pool = Pool(_threads)
for i in tasks:
pool.apply_async(func, args=(i,))
pool.close()
pool.join()
run_task(worker, tasks)
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mesh TensorFlow layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_layers
from tensor2tensor.mesh_tensorflow import mesh_tensorflow as mtf
from tensor2tensor.mesh_tensorflow import mtf_layers
from tensor2tensor.mesh_tensorflow import placement_mesh_impl
import tensorflow as tf
class MtfLayersTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(4, True),
(8, False),
)
def testDense(self, units, use_bias):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
depth_dim = mtf.Dimension("depth", units)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense(mtf_inputs,
output_dim=depth_dim,
reduced_dims=[channels_dim],
activation=mtf.relu,
use_bias=use_bias)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.keras.layers.Dense(units=units,
activation=tf.nn.relu,
use_bias=use_bias)(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testLayerNorm(self):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.layer_norm(mtf_inputs,
dim=channels_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.layer_norm(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testWeightsNonzero(self):
inputs = tf.constant([[3, 1, 0], [1, 0, 0]])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", inputs.shape.as_list()[0])
channels_dim = mtf.Dimension("channels", inputs.shape.as_list()[1])
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.weights_nonzero(mtf_inputs)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.weights_nonzero(inputs)
tf_group = lowering.copy_masters_to_slices()
with self.test_session() as sess:
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertAllEqual(actual, expected)
def testDenseReluDense(self):
batch = 2
channels = 3
hidden = 5
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
hidden_dim = mtf.Dimension("hidden", hidden)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense_relu_dense(mtf_inputs,
hidden_channels=hidden_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, inputs.shape)
@parameterized.parameters(
(4, 2),
)
def testMaskedLocalAttention1D(self, kv_channels, heads):
batch = 2
length_q = 16
length_m = 16
channels = 3
query = tf.random_normal([batch, length_q, channels])
memory = tf.random_normal([batch, length_m, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_q_dim = mtf.Dimension("length_q", length_q)
length_m_dim = mtf.Dimension("length_m", length_m)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_q_dim, channels_dim]))
mtf_memory = mtf.import_tf_tensor(
mesh, memory,
shape=mtf.Shape([batch_dim, length_m_dim, channels_dim]))
mtf_outputs = mtf_layers.masked_local_attention_1d(
mtf_query,
mtf_memory,
kv_channels=kv_channels_dim,
heads=heads_dim,
block_length=2)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, length_q, channels))
@parameterized.parameters(
(2, 4, 5, 7, 3, 1),
)
def testDotProductAttention(
self, batch, heads, length_q, length_kv, depth_k, depth_v):
query = tf.random_normal([batch, heads, length_q, depth_k])
key = tf.random_normal([batch, heads, length_kv, depth_k])
value = tf.random_normal([batch, heads, length_kv, depth_v])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
heads_dim = mtf.Dimension("heads", heads)
length_q_dim = mtf.Dimension("length_q", length_q)
length_kv_dim = mtf.Dimension("length_kv", length_kv)
depth_k_dim = mtf.Dimension("depth_k", depth_k)
depth_v_dim = mtf.Dimension("depth_v", depth_v)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape(
[batch_dim, heads_dim, length_q_dim, depth_k_dim]))
mtf_key = mtf.import_tf_tensor(
mesh, key,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_k_dim]))
mtf_value = mtf.import_tf_tensor(
mesh, value,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_v_dim]))
mtf_outputs = mtf_layers.dot_product_attention(
mtf_query,
mtf_key,
mtf_value,
mask=None)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, heads, length_q, depth_v))
@parameterized.parameters(
(16, 4),
(32, 8),
)
def testMultiheadAttention(self, kv_channels, heads):
batch = 2
length = 8
channels = 3
query = tf.random_normal([batch, length, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_dim, channels_dim]))
mtf_outputs = mtf_layers.multihead_attention(
mtf_query,
memory_antecedent=None,
mask=None,
kv_channels=kv_channels_dim,
heads=heads_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, query.shape)
if __name__ == "__main__":
tf.test.main()
|
"""
Functions to plot helpful information from pemfc model. Dataframes referrenced
in the plotting routine are established in post_pak_dfs, or separately loaded.
"""
import numpy as np
import cantera as ct
import matplotlib.pyplot as plt
from Shared_Funcs.read_and_write import *
from Shared_Funcs.pemfc_property_funcs import *
from Shared_Funcs.pemfc_transport_funcs import *
def fig_starter(fig_num):
while plt.fignum_exists(fig_num):
fig_num = fig_num +1
return fig_num
def debug_plts(df_t,ca,gdl,cl,tog,save,save_folder):
# Plot solution vector variables vs time to check for divergence issues
# when trying to debug. Tog controls high and low detail.
fig_num = fig_starter(0)
""" GDL water volume fractions """
eps_cols_gdl = []
eps_cols_gdl.extend([col for col in df_t.columns if 'eps_w_gdl' in col])
plt.figure(fig_num)
plt.plot(df_t['Time [s]'],df_t[eps_cols_gdl])
plt.legend(eps_cols_gdl,loc='best')
plt.ylabel('GDL Water Volume Frac [-]')
plt.xlabel('Time, t [s]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'GDL_eps_w_v_Time.png')
fig_num = fig_num +1
""" GDL gas densities """
gdl_gas_cols = [col for col in df_t.columns if 'rho_gdl_k' in col]
for i in range(gdl.d['Ny']):
y_cols = [col for col in gdl_gas_cols if 'y'+str(i) in col]
plt.figure(fig_num)
plt.plot(df_t['Time [s]'],df_t[y_cols])
plt.title('GDL y-node='+str(i))
plt.legend(ca.gas.species_names,loc='best')
plt.ylabel(r'GDL Gas $\rho_k$ [kg/m$^3$]')
plt.xlabel('Time, t [s]')
plt.tight_layout()
if save:
fig_name = 'GDL_gas_densities_v_Time_y'+str(i)+'.png'
save_plot(os.getcwd(),save_folder,fig_name)
fig_num = fig_num +1
if tog == 1:
break
""" Double layer potential """
phi_cols = []
phi_cols.extend([col for col in df_t.columns if 'phi_dl' in col])
plt.figure(fig_num)
plt.plot(df_t['Time [s]'],df_t[phi_cols])
plt.legend(phi_cols,loc='best')
plt.ylabel('Cathode DL Potential [V]')
plt.xlabel('Time, t [s]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'Double_Layer_v_Time.png')
fig_num = fig_num +1
""" CL water volume fractions """
eps_cols_cl = []
eps_cols_cl = [col for col in df_t.columns if 'eps_w_cl' in col]
plt.figure(fig_num)
plt.plot(df_t['Time [s]'],df_t[eps_cols_cl])
plt.legend(eps_cols_cl,loc='best')
plt.ylabel('CL Water Volume Frac [-]')
plt.xlabel('Time, t [s]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'CL_eps_w_v_Time.png')
fig_num = fig_num +1
""" CL gas densities """
cl_gas_cols = [col for col in df_t.columns if 'rho_gas_k' in col]
for i in range(cl.d['Ny']):
y_cols = [col for col in cl_gas_cols if 'y'+str(i) in col]
plt.figure(fig_num)
plt.plot(df_t['Time [s]'],df_t[y_cols])
plt.title('CL y-node='+str(i))
plt.legend(ca.gas.species_names,loc='best')
plt.ylabel(r'CL Gas $\rho_k$ [kg/m$^3$]')
plt.xlabel('Time, t [s]')
plt.tight_layout()
if save:
fig_name = 'CL_gas_densities_v_Time_y'+str(i)+'.png'
save_plot(os.getcwd(),save_folder,fig_name)
fig_num = fig_num +1
if tog == 1:
break
""" CL Pt surface sites """
cl_pt_cols = [col for col in df_t.columns if 'theta_pt_k' in col]
for i in range(cl.d['Ny']):
y_cols = [col for col in cl_pt_cols if 'y'+str(i) in col]
plt.figure(fig_num)
plt.plot(df_t['Time [s]'],df_t[y_cols])
plt.title('CL y-node='+str(i))
plt.legend(ca.pt_s[0].species_names)
plt.ylabel('Surface Coverage [-]')
plt.xlabel('Time, t [s]')
plt.tight_layout()
if save:
fig_name = 'CL_pt_coverages_v_Time_y'+str(i)+'.png'
save_plot(os.getcwd(),save_folder,fig_name)
fig_num = fig_num +1
if tog == 1:
break
""" CL Nafion species' densities """
cl_naf_cols = [col for col in df_t.columns if 'rho_naf_k' in col]
for i in range(cl.d['Ny']):
y_cols = [col for col in cl_naf_cols if 'y'+str(i) in col]
for sp in ca.naf_b[0].species_names:
if sp == 'H(Naf)' and tog == 1:
None
else:
sp_cols = [col for col in y_cols if sp in col]
plt.figure(fig_num)
for j in range(cl.d['Nr']):
plt_col = []
plt_col.extend([col for col in sp_cols if 'r'+str(j) in col])
plt.plot(df_t['Time [s]'],df_t[plt_col])
plt.title('CL y-node='+str(i))
plt.legend(['r'+str(n) for n in range(cl.d['Nr'])],loc='best')
plt.ylabel(sp+r' $\rho_k$ [kg/m$^3$]')
plt.xlabel('Time, t [s]')
plt.tight_layout()
if save:
fig_name = 'CL_naf_densities_v_Time_y'+str(i)+'.png'
save_plot(os.getcwd(),save_folder,fig_name)
fig_num = fig_num +1
if tog == 1:
break
return None
def grad_plts(df_y,ca,gdl,cl,tog,save,save_folder):
# Plot solution variable depth gradients to identify trends and limiting
# transport phenomena. Tog controls high and low detail.
fig_num = fig_starter(0)
""" Water volume fraction """
plt.figure(fig_num)
eps_w = df_y['eps_w [-]'].to_numpy()
s_w_gdl = eps_w[:gdl.d['Ny']] / gdl.d['eps_go']
s_w_cl = eps_w[gdl.d['Ny']:] / cl.d['eps_go']
plt.plot(df_y['Depth [um]'],np.hstack([s_w_gdl,s_w_cl]),'-o')
plt.xlabel(r'Cathode Depth [$\mu$m]')
plt.ylabel(r'Water saturation [-]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'Water_vol_frac_v_Depth.png')
fig_num = fig_num +1
""" Gas phase species' densities """
for sp in ca.gas.species_names:
plt.figure(fig_num)
gas_k_col = [col for col in df_y.columns if sp+'(gas)' in col]
plt.plot(df_y['Depth [um]'],df_y[gas_k_col],'-o')
if tog == 2:
plt.xlabel(r'Cathode Depth [$\mu$m]')
plt.ylabel(sp+r'(gas) density [kg/m$^3$]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'Gas_Phase_'+sp+'_Density_v_Depth.png')
fig_num = fig_num +1
if tog == 1:
plt.legend(ca.gas.species_names,loc='best')
plt.xlabel(r'Cathode Depth [$\mu$m]')
plt.ylabel(r'Gas phase $\rho_k$ [kg/m$^3$]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'Gas_Phase_Densities_v_Depth.png')
fig_num = fig_num +1
""" Double layer potential """
plt.figure(fig_num)
x_vals = (df_y['Depth [um]'][gdl.d['Ny']:] - gdl.d['y']*1e6)
y_vals = -1*df_y['phi_dl'][gdl.d['Ny']:]
plt.plot(x_vals,y_vals,'-o')
plt.xlabel(r'Cathode CL Depth [$\mu$m]')
plt.ylabel(r'Electrolyte Potential [V]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'Nafion_Potential_v_CL_Depth.png')
fig_num = fig_num +1
""" Pt surface coverages """
for sp in ca.pt_s[0].species_names:
plt.figure(fig_num)
sp_cols = []
sp_cols.extend([col for col in df_y.columns if sp in col])
x_vals = (df_y['Depth [um]'][gdl.d['Ny']:] - gdl.d['y']*1e6)
y_vals = df_y[sp_cols][gdl.d['Ny']:]
plt.plot(x_vals,y_vals,'-o')
if tog == 2:
plt.xlabel(r'Cathode CL Depth [$/mu$m]')
plt.ylabel(sp+r' coverage [-]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,sp+'_Coverage_v_CL_Depth.png')
fig_num = fig_num +1
if tog == 1:
plt.legend(ca.pt_s[0].species_names,loc='best')
plt.xlabel(r'Cathode CL Depth [$/mu$m]')
plt.ylabel(r'Surface coverage [-]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'Pt_Coverage_v_CL_Depth.png')
fig_num = fig_num +1
""" Nafion phase species' densities """
for sp in ca.naf_b[0].species_names:
if sp == 'H(Naf)' and tog == 1:
None
else:
sp_cols = []
sp_cols.extend([col for col in df_y.columns if sp in col])
plt.figure(fig_num)
for i in range(cl.d['Nr']):
sp_col_r = [col for col in sp_cols if 'r'+str(i) in col]
x_vals = (df_y['Depth [um]'][gdl.d['Ny']:] - gdl.d['y']*1e6)
y_vals = df_y[sp_col_r][gdl.d['Ny']:]
plt.plot(x_vals,y_vals,'-o')
plt.xlabel(r'Cathode CL Depth [$/mu$m]')
plt.ylabel(sp+r' density [kg/m$^3$]')
plt.legend(['r'+str(i) for i in range(cl.d['Nr'])])
plt.tight_layout()
if save:
fig_name = 'Nafion_Phase_'+sp+'_Density_v_CL_Depth.png'
save_plot(os.getcwd(),save_folder,fig_name)
fig_num = fig_num +1
""" Faradaic current fraction """
if 'i_far_frac [-]' in df_y.columns:
plt.figure(fig_num)
x_vals = (df_y['Depth [um]'][gdl.d['Ny']:] - gdl.d['y']*1e6)
y_vals = df_y['i_far_frac [-]'][gdl.d['Ny']:]
plt.plot(x_vals,y_vals,'-o')
plt.xlabel(r'Cathode CL Depth [$/mu$m]')
plt.ylabel(r'i$_{Far}$ / i$_{ext}$ [-]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'i_far_frac_v_CL_Depth.png')
fig_num = fig_num +1
return None
def radial_plts(df_r,ca,gdl,cl,tog,save,save_folder):
# Plot solution variable radial gradients to identify trends and limiting
# transport phenomena. Tog controls high and low detail.
fig_num = fig_starter(0)
cnt = 0
for sp in ca.naf_b[0].species_names:
if sp == 'H(Naf)' and tog == 1:
None
else:
sp_cols = []
sp_cols.extend([col for col in df_r.columns if sp in col])
for i in range(cl.d['Ny']):
plt.figure(fig_num +cnt)
plt_col_x = [col for col in df_r.columns if 'Radius y'+str(i) in col]
plt_col_y = [col for col in sp_cols if 'y'+str(i) in col]
plt.plot(df_r[plt_col_x],df_r[plt_col_y],'-o')
if tog == 1:
break
if tog == 3:
plt.legend(['y'+str(i)],loc='best')
plt.xlabel(r'Nafion Shell Radius [nm]')
plt.ylabel(sp+r' density [kg/m$^3$]')
plt.tight_layout()
cnt = cnt +1
if tog != 3:
plt.legend(['y'+str(j) for j in range(cl.d['Ny'])],loc='best')
plt.xlabel(r'Nafion Shell Radius [nm]')
plt.ylabel(sp+r' density [kg/m$^3$]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,sp+'Density_v_Shell_Radius.png')
fig_num = fig_num +1
return None
def polar_plt(df_p,ca,gdl,cl,polar,data,save,save_folder):
# Plot the polarization curve. Tog controls high and low detail. If data
# requested, overlay Owejan et al. after checking that w_Pt matches.
fig_num = fig_starter(0)
fig = plt.figure(fig_num)
ax1 = fig.add_axes([0.18, 0.2, 0.7, 0.7])
ax1.plot(df_p['i_ext [A/cm2]'],df_p['Voltage [V]'],label='model')
ax1.set_ylabel(r'Cell Voltage [V]')
ax1.set_xlabel(r'Current Density [A/cm$^2$]')
if all([data == 'air', cl.d['w_Pt'] == 0.2]):
y = np.array([0.95,0.85,0.80,0.77,0.73,0.72,0.70,0.68,0.67,0.65,0.63])
s = np.array([0.1,12,7,7,12,1,8,7,7,9,9]) *1e-3
elif all([data == 'air', cl.d['w_Pt'] == 0.1]):
y = np.array([0.93,0.83,0.79,0.75,0.71,0.69,0.67,0.65,0.64,0.62,0.60])
s = np.array([0.1,9,7,5,7,11,11,7,9,11,11]) *1e-3
elif all([data == 'air', cl.d['w_Pt'] == 0.05]):
y = np.array([0.92,0.81,0.76,0.72,0.67,0.65,0.63,0.60,0.59,0.56,0.54])
s = np.array([0.1,8,6,6,7,7,5,5,6,7,7]) *1e-3
elif all([data == 'air', cl.d['w_Pt'] == 0.025]):
y = np.array([0.91,0.79,0.72,0.68,0.63,0.60,0.57,0.53,0.50,0.46,0.43])
s = np.array([0.1,4,10,14,13,13,19,24,25,23,24]) *1e-3
elif all([data == 'o2', cl.d['w_Pt'] == 0.2]):
y = np.array([0.90,0.89,0.86,0.84,0.81,0.77,0.76,0.75,0.73,0.71,0.70])
s = np.array([5,4,8,5,5,5,5,5,6,9,10]) *1e-3
elif all([data == 'o2', cl.d['w_Pt'] == 0.1]):
y = np.array([0.89,0.86,0.84,0.81,0.78,0.74,0.73,0.71,0.69,0.68,0.67])
s = np.array([5,9,5,5,5,5,5,5,8,9,10]) *1e-3
elif all([data == 'o2', cl.d['w_Pt'] == 0.05]):
y = np.array([0.86,0.83,0.81,0.78,0.75,0.71,0.69,0.67,0.65,0.63,0.62])
s = np.array([8,8,6,6,7,8,8,8,9,8,7]) *1e-3
elif all([data == 'o2', cl.d['w_Pt'] == 0.025]):
y = np.array([0.84,0.81,0.78,0.75,0.72,0.67,0.65,0.64,0.61,0.59,0.57])
s = np.array([6,5,5,5,8,12,13,14,16,18,20]) *1e-3
else:
x = y = s = None
if data == 'air':
x = np.array([0.0,0.05,0.20,0.40,0.80,1.0,1.2,1.5,1.65,1.85,2.0])
elif data == 'o2':
x = np.array([0.03,0.05,0.10,0.2,0.4,0.8,1.0,1.2,1.5,1.75,2.0])
if data:
ax1.errorbar(x,y,yerr=s,fmt='.',color='C0',capsize=3,label='Owejan')
ax1.set_ylim([0.35, 1.0])
ax1.set_xlim([0, 2.1])
ax1.legend(loc='lower center')
if polar == 2:
ax2 = ax1.twinx()
ax2.plot(df_p['i_ext [A/cm2]'],df_p['Power [W/cm2]'],'--',color='C0')
ax2.set_ylabel(r'Power Density [W/cm$^2$]')
if save:
save_plot(os.getcwd(),save_folder,'Polarization_Curve.png')
fig_num = fig_num +1
return None
def over_plt(df_p,ca,gdl,cl,save,save_folder):
# Plot the overpotential curve.
fig_num = fig_starter(0)
plt.figure(fig_num)
plt.plot(df_p['i_ext [A/cm2]'], df_p['Eta [V]'])
plt.ylabel(r'Overpotential [V]')
plt.xlabel(r'Current Density [A/cm$^2$]')
plt.tight_layout()
if save:
save_plot(os.getcwd(),save_folder,'Overpotential_Curve.png')
fig_num = fig_num +1
return None
def verification(df_i,ca,gdl,cl,gdl_cl,i_find):
i_ind = np.argmin(abs(df_i[0] - i_find))
sv = df_i.loc[i_ind][1:].to_numpy()
cl.update(ca,sv)
gdl.update(ca,sv)
i_4F = df_i[0][i_ind]*100**2 / (4*ct.faraday)
i_Last_gdl = int(gdl.d['Len'] / gdl.d['Ny'] *(gdl.d['Ny'] -1))
rho_gdl_k = sv[ca.ptr['rho_gdl_k'] +i_Last_gdl]
TDY1 = gdl.d['T'], sum(rho_gdl_k), rho_gdl_k
i_First_cl = 0
rho_cl_k = sv[ca.ptr['rho_gas_k'] +i_First_cl]
TDY2 = cl.d['T'], sum(rho_cl_k), rho_cl_k
TDY_vec = [TDY1, TDY2]
O2_BC_flux = fickian_adf(TDY_vec, ca, gdl_cl, gdl.d, cl.d, None) \
/ ca.gas.molecular_weights
print('\ni_ext:', np.round(df_i[0][i_ind],3))
print('O2_i_4F(x10^5):', np.round(i_4F*1e5,3))
print('O2_BC_flux(x10^5):', np.round(O2_BC_flux[0]*1e5,3))
print('ratio:', np.round(i_4F / O2_BC_flux[0],3),'\n')
return None
|
import os
# inputfile = "c:\ip.txt"
# inputfile='c:\column.bmp'
# outputfile = 'c:\ip_result.txt'
# outputfile2 = 'c:\ip_result2.txt'
from transfor import *
from ReadFile import *
gFilePath = ''
def getReadStart(filename):
return 0
def getFileIndex(filename):
return 1
def convert2filename(ipos, iFileIndex, data):
if (len(data) == 0):
return ''
strPos = Int2Str36(ipos, 2)
strIndex = Int2Str36(iFileIndex)
strData = Bin2Hex(data)
strData = strData.zfill(24)
result = strPos+strIndex+'-'+ strData[0:4] + '-' + strData[4:8] + '-' + strData[8:12] + '-' + strData[8:20];
return result
lstFiles = ()
iFileIndex = 0
def getoldfile():
if (iFileIndex < len(lstFiles)):
result = lstFiles[iFileIndex]
iFileIndex = iFileIndex + 1
return result
return ''
def convert2newfile(oldfile, newname):
(path, name) = os.path.split(oldfile)
(pr, las) = os.path.splitext(name)
newfilename = path + '\\' + newname + las
os.rename(oldfile, newfilename)
def translatefile(filename):
inputfile = filename
iReadStart = getReadStart(filename)
iFileIndex = getFileIndex(filename)
readObj = ReadFile(inputfile)
iFileSize = readObj.filesize()
(iPos, data) = readObj.readvalue(iReadStart, 12)
while(len(data) > 0):
oldFile = getoldfile()
if iReadStart == 0:
newName = convert2filename(iFileSize, 0, filename)
else:
newName = convert2filename(iPos, iFileIndex, data)
while (len(oldFile) == 0):
sleep(1000)
oldFile = getoldfile()
convert2newfile(oldFile, newName)
def translatefile2(filename):
inputfile=filename
outputfile=filename+".result"
infile = open(inputfile, 'rb')
outfile = open(outputfile, 'w')
#outfile2 = open(outputfile2, 'w')
fcount = os.path.getsize(inputfile)
contype = 2
while fcount > 0:
if fcount<1024:
data=infile.read(fcount)
fcount -= fcount
else:
data=infile.read(1024)
fcount-=1024
#outfile.write(data)
for cval in data:
if contype == 1:
strval=bin(ord(cval))
strval=strval[2:]
strval.zfill(8)
strval2=int(strval, 2)
strval2=chr(strval2)
else:
strval=hex(ord(cval))
strval=strval[2:]
strval.zfill(2)
strval2=int(strval, 16)
strval2=chr(strval2)
outfile.write(strval)
#outfile2.write(strval2)
infile.close()
outfile.close()
#outfile2.close()
def travsedir(path):
import os
files=os.listdir(path)
gFilePath = path
for strfile in files:
print ('%s' % strfile)
if (os.path.isfile(path + strfile)):
# (root, extension) = os.path.splitext(strfile)
translatefile(strfile)
def writefile(path):
import os
files = os.listdir(path)
for strfile in files:
print ('%s' % strfile)
if (os.path.isfile(strfile)):
translatefile(strfile)
import sys
if __name__ == '__main__':
#if (len(sys.argv)) != 2:
# exit(1)
f_input = 'c:\\tran\\'
#sys.argv[1]
print ('%s' % f_input)
lstFiles = os.listdir('c:\\itestftptrans')
travsedir(f_input)
|
import math
from app.automata_learning.black_box.pac_learning.normal_teacher.timeInterval import Guard, simple_guards
from app.automata_learning.black_box.pac_learning.normal_teacher.timedWord import TimedWord, ResetTimedWord
class OTA(object):
def __init__(self, actions, states, trans, init_state, accept_states, sink_state):
self.actions = actions
self.states = states
self.trans = trans
self.init_state = init_state
self.accept_states = accept_states
self.sink_state = sink_state
def show_discreteOTA(self):
print("Actions: " + str(self.actions))
print("States: " + str(self.states))
print("InitState: {}".format(self.init_state))
print("AcceptStates: {}".format(self.accept_states))
print("SinkState: {}".format(self.sink_state))
print("Transitions: ")
for t in self.trans:
print(' ' + str(t.tran_id), 'S_' + str(t.source), str(t.action), str(t.time_point), str(t.reset), 'S_' + str(t.target), end="\n")
def show_OTA(self):
print("Actions: " + str(self.actions))
print("States: " + str(self.states))
print("InitState: {}".format(self.init_state))
print("AcceptStates: {}".format(self.accept_states))
print("SinkState: {}".format(self.sink_state))
print("Transitions: ")
for t in self.trans:
print(" " + str(t.tran_id), 'S_' + str(t.source), str(t.action), t.show_guards(), str(t.reset), 'S_' + str(t.target), end="\n")
def __lt__(self, other):
return len(self.states) < len(other.states)
# Perform tests(DTWs) on the hypothesis(smart teacher), return value and DRTWs(full)
def test_DTWs(self, DTWs):
DRTWs = []
now_time = 0
cur_state = self.init_state
for dtw in DTWs:
if cur_state == self.sink_state:
DRTWs.append(ResetTimedWord(dtw.action, dtw.time, True))
else:
time = dtw.time + now_time
new_LTW = TimedWord(dtw.action, time)
for tran in self.trans:
if tran.source == cur_state and tran.is_passing_tran(new_LTW):
cur_state = tran.target
if tran.reset:
now_time = 0
reset = True
else:
now_time = time
reset = False
DRTWs.append(ResetTimedWord(dtw.action, dtw.time, reset))
break
if cur_state in self.accept_states:
value = 1
elif cur_state == self.sink_state:
value = -1
else:
value = 0
return DRTWs, value
# Perform tests(DTWs) on the hypothesis(normal teacher), return value
def test_DTWs_normal(self, DTWs):
now_time = 0
cur_state = self.init_state
for dtw in DTWs:
time = dtw.time + now_time
new_LTW = TimedWord(dtw.action, time)
for tran in self.trans:
if tran.source == cur_state and tran.is_passing_tran(new_LTW):
cur_state = tran.target
if tran.reset:
now_time = 0
else:
now_time = time
if cur_state == self.sink_state:
return -1
break
if cur_state in self.accept_states:
value = 1
else:
value = 0
return value
# build simple hypothesis - merge guards
def build_simple_hypothesis(self):
actions = self.actions
states = self.states
init_state = self.init_state
accept_states = self.accept_states
sink_state = self.sink_state
trans = []
tran_num = 0
for s in self.states:
for t in self.states:
for action in actions:
for reset in [True, False]:
temp = []
for tran in self.trans:
if tran.source == s and tran.action == action and tran.target == t and tran.reset == reset:
temp.append(tran)
if temp:
guards = []
for i in temp:
guards += i.guards
guards = simple_guards(guards)
trans.append(OTATran(tran_num, s, action, guards, reset, t))
tran_num += 1
return OTA(actions, states, trans, init_state, accept_states, sink_state)
# Get the max time value constant appearing in OTA.
def max_time_value(self):
max_time_value = 0
for tran in self.trans:
for c in tran.guards:
if c.max_value == '+':
temp_max_value = float(c.min_value) + 1
else:
temp_max_value = float(c.max_value)
if max_time_value < temp_max_value:
max_time_value = temp_max_value
return max_time_value
class DiscreteOTATran(object):
def __init__(self, tran_id, source, action, time_point, reset, target):
self.tran_id = tran_id
self.source = source
self.action = action
self.time_point = time_point
self.reset = reset
self.target = target
class OTATran(object):
def __init__(self, tran_id, source, action, guards, reset, target):
self.tran_id = tran_id
self.source = source
self.action = action
self.guards = guards
self.reset = reset
self.target = target
def is_passing_tran(self, ltw):
if ltw.action == self.action:
for guard in self.guards:
if guard.is_in_interval(ltw.time):
return True
else:
return False
return False
def show_guards(self):
temp = self.guards[0].show()
for i in range(1, len(self.guards)):
temp = temp + 'U' + self.guards[i].show()
return temp
def struct_discreteOTA(table, actions):
states = []
trans = []
init_state = ''
accept_states = []
sink_state = ''
# deal with states
values_name_dict = {}
for s, i in zip(table.S, range(0, len(table.S))):
state_name = i
values_name_dict[str(s.values) + str(s.suffixes_resets)] = state_name
states.append(state_name)
if not s.LRTWs:
init_state = state_name
if s.values[0] == 1:
accept_states.append(state_name)
if s.values[0] == -1:
sink_state = state_name
# deal with trans
trans_num = 0
table_elements = [s for s in table.S] + [r for r in table.R]
for r in table_elements:
source = None
target = None
if not r.LRTWs:
continue
timedWords = [lrtw for lrtw in r.LRTWs]
w = timedWords[:-1]
a = timedWords[len(timedWords) - 1]
for element in table_elements:
if is_equal(w, element.LRTWs):
source = values_name_dict[str(element.values) + str(element.suffixes_resets)]
if is_equal(timedWords, element.LRTWs):
target = values_name_dict[str(element.values) + str(element.suffixes_resets)]
# 确认迁移 action
action = a.action
time_point = a.time
reset = a.reset
# 是否需要添加新迁移
need_new_tran_flag = True
for tran in trans:
if source == tran.source and action == tran.action and target == tran.target:
if time_point == tran.time_point:
need_new_tran_flag = False
if reset != tran.reset:
return None
break
if need_new_tran_flag:
temp_tran = DiscreteOTATran(trans_num, source, action, time_point, reset, target)
trans.append(temp_tran)
trans_num = trans_num + 1
return OTA(actions, states, trans, init_state, accept_states, sink_state)
def struct_hypothesisOTA(discreteOTA):
trans = []
for s in discreteOTA.states:
s_dict = {}
for key in discreteOTA.actions:
s_dict[key] = [0]
for tran in discreteOTA.trans:
if tran.source == s:
for action in discreteOTA.actions:
if tran.action == action:
temp_list = s_dict[action]
if tran.time_point not in temp_list:
temp_list.append(tran.time_point)
s_dict[action] = temp_list
for value in s_dict.values():
value.sort()
for tran in discreteOTA.trans:
if tran.source == s:
time_points = s_dict[tran.action]
guards = []
tw = tran.time_point
index = time_points.index(tw)
if index + 1 < len(time_points):
if is_int(tw) and is_int(time_points[index + 1]):
tempGuard = Guard("[" + str(tw) + "," + str(time_points[index + 1]) + ")")
elif is_int(tw) and not is_int(time_points[index + 1]):
tempGuard = Guard("[" + str(tw) + "," + str(math.modf(time_points[index + 1])[1]) + "]")
elif not is_int(tw) and is_int(time_points[index + 1]):
tempGuard = Guard("(" + str(math.modf(tw)[1]) + "," + str(time_points[index + 1]) + ")")
else:
tempGuard = Guard("(" + str(math.modf(tw)[1]) + "," + str(math.modf(time_points[index + 1])[1]) + "]")
guards.append(tempGuard)
else:
if is_int(tw):
tempGuard = Guard("[" + str(tw) + ",+)")
else:
tempGuard = Guard("(" + str(math.modf(tw)[1]) + ",+)")
guards.append(tempGuard)
for guard in guards:
trans.append(OTATran(tran.tran_id, tran.source, tran.action, [guard], tran.reset, tran.target))
return OTA(discreteOTA.actions, discreteOTA.states, trans, discreteOTA.init_state, discreteOTA.accept_states, discreteOTA.sink_state)
# 去除sink状态的迁移
def remove_sink_state(hypothesis):
if hypothesis.sink_state == '':
return hypothesis
actions = hypothesis.actions
states = hypothesis.states
init_state = hypothesis.init_state
accept_states = hypothesis.accept_states
states.remove(hypothesis.sink_state)
trans = []
for tran in hypothesis.trans:
if tran.source != hypothesis.sink_state and tran.target != hypothesis.sink_state:
trans.append(tran)
return OTA(actions, states, trans, init_state, accept_states, hypothesis.sink_state)
# --------------------------------- auxiliary function ---------------------------------
# Determine whether two LRTWs are the same
def is_equal(LRTWs1, LRTWs2):
if len(LRTWs1) != len(LRTWs2):
return False
else:
flag = True
for i in range(len(LRTWs1)):
if LRTWs1[i] != LRTWs2[i]:
flag = False
break
if flag:
return True
else:
return False
# instance of Int?
def is_int(num):
x, y = math.modf(num)
if x == 0:
return True
else:
return False
|
import logging
import multiprocessing
from queue import Full, Empty
from time import time
from unittest import TestCase
import ctypes
from faster_fifo import Queue
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
fmt = logging.Formatter('[%(asctime)s][%(process)05d] %(message)s')
ch.setFormatter(fmt)
log = logging.getLogger('rl')
log.setLevel(logging.DEBUG)
log.handlers = [] # No duplicated handlers
log.propagate = False # workaround for duplicated logs in ipython
log.addHandler(ch)
MSG_SIZE = 5
def make_msg(msg_idx):
return (msg_idx,) * MSG_SIZE
def produce_msgs(q, p_idx, num_messages):
i = 0
while i < num_messages:
try:
q.put(make_msg(i), timeout=0.01)
if i % 50000 == 0:
log.info('Produce: %d %d', i, p_idx)
i += 1
except Full:
pass
except Exception as exc:
log.exception(exc)
def consume_msgs(q, p_idx, all_msgs_sent, consume_many=1):
num_received = 0
while True:
try:
if consume_many == 1:
msg = q.get(timeout=0.01)
msgs = [msg]
else:
msgs = q.get_many(timeout=0.01, max_messages_to_get=consume_many)
for msg in msgs:
if msg[0] % 50000 == 0:
log.info('Consume: %r %d num_msgs: %d total received: %d', msg, p_idx, len(msgs), num_received)
num_received += 1
except Empty:
if all_msgs_sent.value:
break
except Exception as exc:
log.exception(exc)
def run_test(queue_cls, num_producers, num_consumers, msgs_per_prod, consume_many):
start_time = time()
q = queue_cls(100000)
producers = []
consumers = []
all_msgs_sent = multiprocessing.RawValue(ctypes.c_bool, False)
for j in range(num_producers):
p = multiprocessing.Process(target=produce_msgs, args=(q, j, msgs_per_prod))
producers.append(p)
for j in range(num_consumers):
p = multiprocessing.Process(target=consume_msgs, args=(q, j, all_msgs_sent, consume_many))
consumers.append(p)
for p in producers:
p.start()
for c in consumers:
c.start()
for p in producers:
p.join()
all_msgs_sent.value = True
for c in consumers:
c.join()
q.close()
log.info('Exiting queue type %s', queue_cls.__module__ + '.' + queue_cls.__name__)
end_time = time()
time_taken = end_time - start_time
log.info('Time taken by queue type %s is %.5f', queue_cls.__module__ + '.' + queue_cls.__name__, time_taken)
return time_taken
class ComparisonTestCase(TestCase):
def comparison(self, n_prod, n_con, n_msgs):
n_msgs += 1 # +1 here to make sure the last log line will be printed
time_ff = run_test(Queue, num_producers=n_prod, num_consumers=n_con, msgs_per_prod=n_msgs, consume_many=1)
time_ff_many = run_test(Queue, num_producers=n_prod, num_consumers=n_con, msgs_per_prod=n_msgs,
consume_many=100)
time_mp = run_test(multiprocessing.Queue, num_producers=n_prod, num_consumers=n_con, msgs_per_prod=n_msgs,
consume_many=1)
self.assertLess(time_ff, time_mp)
self.assertLess(time_ff_many, time_mp)
return time_ff, time_ff_many, time_mp
def test_all_configurations(self):
configurations = (
(1, 1, 200000),
(1, 10, 200000),
(10, 1, 100000),
(3, 20, 100000),
(20, 3, 50000),
(20, 20, 50000),
)
results = []
for c in configurations:
results.append(self.comparison(*c))
log.info('\nResults:\n')
for c, r in zip(configurations, results):
log.info('Configuration %r, timing [ff: %.2fs, ff_many: %.2fs, mp.queue: %.2fs]', c, *r)
# i9-7900X (10-core CPU)
# [2020-05-16 03:24:26,548][30412] Configuration (1, 1, 200000), timing [ff: 0.92s, ff_many: 0.93s, mp.queue: 2.83s]
# [2020-05-16 03:24:26,548][30412] Configuration (1, 10, 200000), timing [ff: 1.43s, ff_many: 1.40s, mp.queue: 7.60s]
# [2020-05-16 03:24:26,548][30412] Configuration (10, 1, 100000), timing [ff: 4.95s, ff_many: 1.40s, mp.queue: 12.24s]
# [2020-05-16 03:24:26,548][30412] Configuration (3, 20, 100000), timing [ff: 2.29s, ff_many: 2.25s, mp.queue: 13.25s]
# [2020-05-16 03:24:26,548][30412] Configuration (20, 3, 50000), timing [ff: 3.19s, ff_many: 1.12s, mp.queue: 29.07s]
# [2020-05-16 03:24:26,548][30412] Configuration (20, 20, 50000), timing [ff: 1.65s, ff_many: 4.14s, mp.queue: 46.71s]
# i5-4200U (dual-core CPU)
# [2020-05-22 18:03:55,061][09146] Configuration (1, 1, 200000), timing [ff: 2.09s, ff_many: 2.20s, mp.queue: 7.86s]
# [2020-05-22 18:03:55,061][09146] Configuration (1, 10, 200000), timing [ff: 4.01s, ff_many: 3.88s, mp.queue: 11.68s]
# [2020-05-22 18:03:55,061][09146] Configuration (10, 1, 100000), timing [ff: 16.68s, ff_many: 5.98s, mp.queue: 44.48s]
# [2020-05-22 18:03:55,061][09146] Configuration (3, 20, 100000), timing [ff: 7.83s, ff_many: 7.49s, mp.queue: 22.59s]
# [2020-05-22 18:03:55,061][09146] Configuration (20, 3, 50000), timing [ff: 22.30s, ff_many: 6.35s, mp.queue: 66.30s]
# [2020-05-22 18:03:55,061][09146] Configuration (20, 20, 50000), timing [ff: 14.39s, ff_many: 15.78s, mp.queue: 78.75s]
|
# -*- compile-command: "../../tests/test_refine.py" -*-
# Copyright (c) 2007-2009 PediaPress GmbH
# See README.rst for additional licensing information.
from mwlib.utoken import show, token as T
from mwlib.refine import util
class parse_table_cells(object):
def __init__(self, tokens, xopts):
self.tokens = tokens
self.run()
def is_table_cell_start(self, token):
return token.type==T.t_column or (token.type==T.t_html_tag and token.rawtagname in ("td", "th"))
def is_table_cell_end(self, token):
return token.type==T.t_html_tag_end and token.rawtagname in ("td", "th")
def find_modifier(self, cell):
children = cell.children
if not children:
return
for i,x in enumerate(children):
t = children[i]
if t.type==T.t_2box_open:
break
if t.type==T.t_special and t.text=="|":
mod = T.join_as_text(children[:i])
cell.vlist = util.parseParams(mod)
del children[:i+1]
return
def replace_tablecaption(self, children):
i = 0
while i<len(children):
if children[i].type == T.t_tablecaption:
children[i].type = T.t_special
children[i].text = u"|"
children.insert(i+1, T(type=T.t_text, text="+"))
i+=1
def run(self):
tokens = self.tokens
i = 0
start = None
self.is_header = False
def makecell(skip_end=0):
st = tokens[start].text.strip()
if st=="|":
self.is_header = False
elif st=="!":
self.is_header = True
is_header = self.is_header
if tokens[start].rawtagname=="th":
is_header = True
elif tokens[start].rawtagname=="td":
is_header = False
if is_header:
tagname = "th"
else:
tagname = "td"
search_modifier = tokens[start].text.strip() in ("|", "!", "||", "!!")
sub = tokens[start+1:i-skip_end]
self.replace_tablecaption(sub)
tokens[start:i] = [T(type=T.t_complex_table_cell, tagname=tagname,
start=tokens[start].start, children=sub,
vlist=tokens[start].vlist, is_header=is_header)]
if search_modifier:
self.find_modifier(tokens[start])
while i < len(tokens):
if self.is_table_cell_start(tokens[i]):
if start is not None:
makecell()
start += 1
i = start+1
else:
start = i
i+=1
elif self.is_table_cell_end(tokens[i]):
if start is not None:
i+=1
makecell(skip_end=1)
i = start+1
start = None
else:
i+= 1
else:
i += 1
if start is not None:
makecell()
class parse_table_rows(object):
def __init__(self, tokens, xopts):
self.tokens = tokens
self.xopts = xopts
self.run()
def is_table_row_start(self, token):
return token.type==T.t_row or (token.type==T.t_html_tag and token.rawtagname=='tr')
def is_table_row_end(self, token):
return token.type==T.t_html_tag_end and token.rawtagname=='tr'
def find_modifier(self, row):
children = row.children
for i,x in enumerate(children):
if x.type in (T.t_newline, T.t_break):
mod = T.join_as_text(children[:i])
#print "MODIFIER:", repr(mod)
row.vlist = util.parseParams(mod)
del children[:i]
return
def is_table_cell_start(self, token):
return token.type==T.t_column or (token.type==T.t_html_tag and token.rawtagname in ("td", "th"))
def run(self):
tokens = self.tokens
i = 0
start = None
remove_start = 1
rowbegintoken = None
def should_find_modifier():
if rowbegintoken is None:
return False
if rowbegintoken.rawtagname:
return False
return True
def args():
if rowbegintoken is None:
return {}
return dict(vlist=rowbegintoken.vlist)
while i < len(tokens):
if start is None and self.is_table_cell_start(tokens[i]):
rowbegintoken = None
start = i
remove_start = 0
i+=1
elif self.is_table_row_start(tokens[i]):
if start is not None:
children = tokens[start+remove_start:i]
tokens[start:i] = [T(type=T.t_complex_table_row, tagname="tr", start=tokens[start].start, children=children, **args())]
if should_find_modifier():
self.find_modifier(tokens[start])
parse_table_cells(children, self.xopts)
start += 1 # we didn't remove the start symbol above
rowbegintoken= tokens[start]
remove_start = 1
i = start+1
else:
rowbegintoken = tokens[i]
remove_start = 1
start = i
i+=1
elif self.is_table_row_end(tokens[i]):
if start is not None:
sub = tokens[start+remove_start:i]
tokens[start:i+1] = [T(type=T.t_complex_table_row, tagname="tr", start=tokens[start].start, children=sub, **args())]
if should_find_modifier():
self.find_modifier(tokens[start])
parse_table_cells(sub, self.xopts)
i = start+1
start = None
rowbegintoken = None
else:
i+= 1
else:
i += 1
if start is not None:
sub = tokens[start+remove_start:]
tokens[start:] = [T(type=T.t_complex_table_row, tagname="tr", start=tokens[start].start, children=sub, **args())]
if should_find_modifier():
self.find_modifier(tokens[start])
parse_table_cells(sub, self.xopts)
class parse_tables(object):
def __init__(self, tokens, xopts):
self.xopts = xopts
self.tokens = tokens
self.run()
def is_table_start(self, token):
return token.type==T.t_begintable or (token.type==T.t_html_tag and token.rawtagname=="table")
def is_table_end(self, token):
return token.type==T.t_endtable or (token.type==T.t_html_tag_end and token.rawtagname=="table")
def handle_rows(self, sublist):
parse_table_rows(sublist, self.xopts)
def find_modifier(self, table):
children = table.children
def compute_mod():
mod = T.join_as_text(children[:i])
#print "MODIFIER:", repr(mod)
table.vlist = util.parseParams(mod)
del children[:i]
i = 0
for i,x in enumerate(children):
if x.type in (T.t_newline, T.t_break):
break
compute_mod()
def find_caption(self, table):
children = table.children
start = None
i = 0
while i < len(children):
t = children[i]
if t.type==T.t_tablecaption:
start = i
i += 1
break
if t.text is None or t.text.strip():
return
i+=1
modifier = None
while i<len(children):
t = children[i]
if t.tagname not in ("ref",) and (t.text is None or t.text.startswith("\n")):
if modifier:
mod = T.join_as_text(children[start:modifier])
vlist = util.parseParams(mod)
sub = children[modifier+1:i]
else:
sub = children[start+1:i]
vlist = {}
caption = T(type=T.t_complex_caption, children=sub, vlist=vlist)
children[start:i] = [caption]
return
elif t.text=="|" and modifier is None:
modifier = i
elif t.type == T.t_2box_open and modifier is None:
modifier = 0
i += 1
def run(self):
tokens = self.tokens
i = 0
stack = []
def maketable():
start = stack.pop()
starttoken = tokens[start]
sub = tokens[start+1:i]
from mwlib.refine import core
tp = core.tagparser()
tp.add("caption", 5)
tp(sub, self.xopts)
tokens[start:i+1] = [T(type=T.t_complex_table,
tagname="table", start=tokens[start].start, children=sub,
vlist=starttoken.vlist, blocknode=True)]
if starttoken.text.strip() == "{|":
self.find_modifier(tokens[start])
self.handle_rows(sub)
self.find_caption(tokens[start])
return start
while i < len(tokens):
if self.is_table_start(tokens[i]):
stack.append(i)
i+=1
elif self.is_table_end(tokens[i]):
if stack:
i = maketable()+1
else:
i += 1
else:
i += 1
while stack:
maketable()
class fix_tables(object):
def __init__(self, tokens, xopts):
self.xopts = xopts
self.tokens = tokens
self.run()
def run(self):
tokens = self.tokens
for x in tokens:
if x.type != T.t_complex_table:
continue
rows = [c for c in x.children if c.type in (T.t_complex_table_row, T.t_complex_caption)]
if not rows:
x.type = T.t_complex_node
x.tagname = None
def extract_garbage(tokens, is_allowed, is_whitespace=None):
if is_whitespace is None:
is_whitespace = lambda t: t.type in (T.t_newline, T.t_break)
res = []
i = 0
start = None
while i<len(tokens):
if is_whitespace(tokens[i]):
if start is None:
start = i
i+=1
elif is_allowed(tokens[i]):
start = None
i+=1
else:
if start is None:
start = i
i+=1
# find end of garbage
while i<len(tokens):
if is_allowed(tokens[i]):
break
i+= 1
garbage = tokens[start:i]
del tokens[start:i]
i = start
res.append(T(type=T.t_complex_node, children=garbage))
return res
class remove_table_garbage(object):
need_walker = False
def __init__(self, tokens, xopts):
from mwlib.refine import core
walker = core.get_token_walker()
for t in walker(tokens):
self.tokens = t
self.run()
def run(self):
tokens = self.tokens
tableidx = 0
while tableidx<len(tokens):
if tokens[tableidx].type==T.t_complex_table:
# garbage = extract_garbage(tokens[tableidx].children,
# is_allowed=lambda t: t.type in (T.t_complex_table_row, T.t_complex_caption))
tmp = []
for c in tokens[tableidx].children:
if c.type==T.t_complex_table_row:
rowgarbage = extract_garbage(c.children,
is_allowed=lambda t: t.type in (T.t_complex_table_cell, ))
tmp.extend(rowgarbage)
tokens[tableidx+1:tableidx+1] = tmp
tableidx+=1
|
from model_garden.models import Dataset
from tests import BaseTestCase
class TestDataset(BaseTestCase):
def test_str(self):
dataset = self.test_factory.create_dataset()
self.assertEqual(str(dataset),
f"Dataset(path='{dataset.path}', bucket='{dataset.bucket.name}', "
f"dataset_format='{dataset.dataset_format}')")
def test_create(self):
self.test_factory.create_dataset()
self.assertEqual(Dataset.objects.count(), 1)
|
'''
Created by auto_sdk on 2015.09.22
'''
from top.api.base import RestApi
class ItemImgUploadRequest(RestApi):
def __init__(self, domain='gw.api.taobao.com', port=80):
RestApi.__init__(self, domain, port)
self.id = None
self.image = None
self.is_major = None
self.num_iid = None
self.position = None
def getapiname(self):
return 'taobao.item.img.upload'
def getMultipartParas(self):
return ['image']
|
from django.apps import AppConfig
class WithdrawConfig(AppConfig):
name = 'withdraw'
|
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db.models import Avg
# signals for automating token generation
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.conf import settings
class Movie(models.Model):
title = models.CharField(max_length=300, null=False, unique=True)
story = models.TextField()
genre = models.CharField(max_length=10)
released_year = models.CharField(max_length=4)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
average_rating = models.FloatField(default=0.0)
def __str__(self):
return str(self.title)
class Rating(models.Model):
given_by = models.ForeignKey(User, on_delete=models.CASCADE)
for_movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
rating = models.IntegerField(default=1, validators=[MinValueValidator(1), MaxValueValidator(10)])
def __str__(self):
return str(self.given_by.username) + " rated " + str(self.for_movie) + " - " + str(self.rating) + " stars"
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def createAuthToken(sender, instance, created, **kwargs):
if created:
Token.objects.create(user=instance)
def cal_average():
for movie in Movie.objects.all():
all_ratings = Rating.objects.filter(for_movie=movie)
if all_ratings:
avg = all_ratings.aggregate(Avg('rating'))['rating__avg']
movie.average_rating = avg
movie.save()
else:
movie.average_rating = 0.0
movie.save()
@receiver(post_save, sender=Rating)
def calculateAverageRating(sender, instance, **kwargs):
cal_average()
@receiver(post_delete, sender=Rating)
def calculateAverageRatingAfterDeletion(sender, instance, **kwargs):
cal_average()
|
# Generated by Django 3.2.5 on 2022-03-03 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0215_add_tags_back"),
]
operations = [
migrations.AddField(
model_name="insight", name="derived_name", field=models.CharField(blank=True, max_length=400, null=True),
),
]
|
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020, 2021, 2022
# Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import re
from docutils import nodes, utils
from docutils.parsers import rst
from docutils.parsers.rst.roles import set_classes
# to avoid dependencies, link_regexp and parse_link() is common for m.abbr,
# m.gh, m.gl, m.link and m.vk
link_regexp = re.compile(r'(?P<title>.*) <(?P<link>.+)>')
def parse_link(text):
link = utils.unescape(text)
m = link_regexp.match(link)
if m: return m.group('title', 'link')
return None, link
def abbr(name, rawtext, text, lineno, inliner, options={}, content=[]):
abbr, title = parse_link(text)
set_classes(options)
if not abbr:
return [nodes.abbreviation(title, title, **options)], []
return [nodes.abbreviation(abbr, abbr, title=title, **options)], []
def register_mcss(**kwargs):
rst.roles.register_local_role('abbr', abbr)
register = register_mcss # for Pelican
|
from django.apps import AppConfig
class RandevuConfig(AppConfig):
name = 'randevu'
|
import torch
from torch.nn import functional as F
class DropoutMC:
def __init__(self, num_samples=16, p=0.2):
self.num_samples = num_samples
self.p = p
def create_samples(self, module, activation, *args, **kwargs):
res = []
for i in range(self.num_samples):
new_args = [F.dropout(arg, p=self.p, training=True, inplace=False) for arg in args]
new_kwargs = {key: F.dropout(value, p=self.p, training=True, inplace=False) for key, value in kwargs.items()}
logits = module(*new_args, **new_kwargs)
if activation is not None:
logits = activation(logits)
res.append(logits)
return torch.stack(res, dim=0)
|
from pythonforandroid.recipe import CythonRecipe
class NeoscryptRecipe(CythonRecipe):
url = 'git+https://github.com/sparkspay/python-neoscrypt.git'
version = 'master'
name = 'neoscrypt'
depends = [('python3crystax')]
def get_recipe_env(self, arch):
'''
cython has problem with -lpython3.6m , hack to add -L to it
roughly adapted from similar numpy (fix-numpy branch) dirty solution
'''
env = super(NeoscryptRecipe, self).get_recipe_env(arch)
#: Hack add path L to crystax as a CFLAG
if 'python3crystax' not in self.ctx.recipe_build_order:
return env
api_ver = self.ctx.android_api
flags = " -L{ctx.ndk_dir}/sources/python/3.6/libs/{arch}/" \
.format(ctx=self.ctx, arch=arch.arch)
env['LDFLAGS'] += flags
return env
recipe = NeoscryptRecipe()
|
# Information about explored functions
import re
import idaapi
import idautils
import idc
imported_ea = set()
demangled_names = {}
touched_functions = set()
temporary_structure = None
def init_imported_ea(*args):
def imp_cb(ea, name, ord):
imported_ea.add(ea)
# True -> Continue enumeration
# False -> Stop enumeration
return True
print "[Info] Collecting information about imports"
imported_ea.clear()
nimps = idaapi.get_import_module_qty()
for i in xrange(0, nimps):
name = idaapi.get_import_module_name(i)
if not name:
print "[Warning] Failed to get import module name for #%d" % i
continue
# print "Walking-> %s" % name
idaapi.enum_import_names(i, imp_cb)
print "[Info] Done..."
def init_demangled_names(*args):
"""
Creates dictionary of demangled names => address, that will be used further at double click on methods got from
symbols.
"""
demangled_names.clear()
for address, name in idautils.Names():
short_name = idc.Demangle(name, idc.GetLongPrm(idc.INF_SHORT_DN))
if short_name:
demangled_names[short_name.split('(')[0]] = address - idaapi.get_imagebase()
# Names can have templates and should be transformed before creating local type
name = re.sub(r'[<>]', '_t_', name)
# Thunk functions with name like "[thunk]:CWarmupHostProvider::Release`adjustor{8}'"
result = re.search(r"(\[thunk\]:)?([^`]*)(.*\{(\d+)}.*)?", short_name)
name, adjustor = result.group(2), result.group(4)
if adjustor:
demangled_names[name + "_adj_" + adjustor] = address - idaapi.get_imagebase()
print "[DEBUG] Demangled names have been initialized"
def reset_touched_functions(*args):
touched_functions = set()
|
class ScheduledOptim():
'''A simple wrapper class for learning rate scheduling'''
def __init__(self, optimizer, init_lr, d_model, n_warmup_steps):
assert n_warmup_steps > 0, 'must be greater than 0'
self._optimizer = optimizer
self.init_lr = init_lr
self.d_model = d_model
self.n_warmup_steps = n_warmup_steps
self.n_steps = 0
def step_and_update_lr(self):
"Step with the inner optimizer"
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
"Zero out the gradients with the inner optimizer"
self._optimizer.zero_grad()
def _get_lr_scale(self):
d_model = self.d_model
n_steps, n_warmup_steps = self.n_steps, self.n_warmup_steps
return (d_model ** -0.5) * min(n_steps ** (-0.5), n_steps * n_warmup_steps ** (-1.5))
def state_dict(self):
optimizer_state_dict = {
'init_lr':self.init_lr,
'd_model':self.d_model,
'n_warmup_steps':self.n_warmup_steps,
'n_steps':self.n_steps,
'_optimizer':self._optimizer.state_dict(),
}
return optimizer_state_dict
def load_state_dict(self, state_dict):
self.init_lr = state_dict['init_lr']
self.d_model = state_dict['d_model']
self.n_warmup_steps = state_dict['n_warmup_steps']
self.n_steps = state_dict['n_steps']
self._optimizer.load_state_dict(state_dict['_optimizer'])
def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
|
import os
import sys
import sysconfig
from setuptools import find_packages
from setuptools import setup
from setuptools.extension import Extension
def config_cython():
sys_cflags = sysconfig.get_config_var('CFLAGS')
try:
from Cython.Build import cythonize
ret = []
path = 'tpm/_cython'
for fn in os.listdir(path):
if fn.endswith('.pyx'):
ret.append(Extension(
'tpm.%s' % fn[:-4],
['%s/%s' % (path, fn)],
include_dirs=['../include'],
# libraries=['tpm_runtime'],
libraries=[],
extra_compile_args=['-DUSE_CUDNN', '-std=c++11'],
extra_link_args=[],
language='c++',
))
return cythonize(ret, compiler_directives={'language_level': 3})
except ImportError:
print("Cython is not installed")
return []
setup(
name='tpm',
version='0.1',
description='Optimizing Deep Learning Computations with Tensor Mutations',
zip_safe=False,
install_requires=[],
packages=find_packages(),
url='https://github.com/whjthu/ml-opt',
ext_modules=config_cython(),
)
|
import argparse
import socket
from multiprocessing import Process
from rq import Connection, Queue, Worker
from redis import Redis
def run_worker(queue, name):
Worker(queue, name=name).work()
parser = argparse.ArgumentParser()
parser.add_argument('--redis_host', default='localhost')
parser.add_argument('--redis_port', type=int, default=6379)
parser.add_argument('--queue_name', default='render_queue')
parser.add_argument('--num_workers', type=int, default=8)
args = parser.parse_args()
hostname = socket.gethostname()
with Connection(Redis(args.redis_host, args.redis_port)):
q = Queue(args.queue_name)
for i in range(args.num_workers):
name = '{}__{}.{}'.format(hostname, args.queue_name, i + 1)
Process(target=run_worker, args=(q, name)).start()
|
__author__ = 'obilici'
import threading
import serial
class Referee (threading.Thread):
def __init__(self, threadID, name, robotid, fieldid, player, refreeSignal):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.robotid = robotid
self.fieldid = fieldid
self.player = player
self.refreeSignal = refreeSignal
def connect(self):
self.ser = serial.Serial('/dev/ttyACM0',19200,timeout=.1)
def sendAck(self):
try:
self.ser.write("a"+self.fieldid+self.robotid+"ACK-----")
except self.ser.SerialTimeoutException:
print "Timeout : " + command
def processCommand(self, line):
if line == "a"+self.fieldid+self.robotid+"START----" or line == "a"+self.fieldid+"XSTART----":
self.player.changeMode(player.SEARCHING_FOR_BALL)
self.sendAck()
elif line == "a"+self.fieldid+self.robotid+"STOP----" or line == "a"+self.fieldid+"XSTOP----":
self.player.changeMode(player.WAITING_FOR_REFREE)
self.sendAck()
def getCommand(self):
saving = False
counter = 0
line = ''
for c in self.ser.read():
if c == 'a':
saving = True
if saving == True:
line += c
if c == '-':
counter +=1
if counter == 4:
counter = 0
break
def run(self):
while self.refreeSignal.is_set():
try:
line = self.getCommand()
self.processCommand(line)
except self.ser.SerialTimeoutException:
pass
|
import discord
from discord.ext.commands import DefaultHelpCommand
class helpformatter(DefaultHelpCommand):
def get_ending_note(self):
return "JOKESTA TM"
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-16 23:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jigsaw', '0006_auto_20160315_2315'),
]
operations = [
migrations.RenameField(
model_name='game',
old_name='host',
new_name='creator',
),
]
|
# coding:utf-8
import codecs
'''
read and write file
'''
# read file, return dict
def readWordSet(file):
fin = codecs.open(file, "r", "utf-8")
wordSet = set()
for line in fin.readlines():
word = line.strip().lower()
wordSet.add(word)
fin.close()
return wordSet
def readParameter(file):
fin = codecs.open(file, "r", "utf-8")
param = {}
for line in fin.readlines():
tokens = line.strip().lower().split("\t")
if len(tokens[1].split(':')) >= 2:
temp = {}
for token in tokens[1].strip().split(" "):
tempToken = token.split(':')
temp[tempToken[0]] = tempToken[1]
param[tokens[0]] = temp
else:
param[tokens[0]] = tokens[1]
fin.close()
return param
# write dict to a file
def writeFile(file, words):
fout = codecs.open(file, "w", "utf-8")
for k, v in words.items():
if type(v) == dict:
fout.write(k + "\t")
for kk, vv in v.items():
fout.write(kk + ":" + str(vv) + " ")
fout.write("\n")
else:
fout.write(k + "\t" + str(v) + "\n")
fout.close()
'''
compute the minimum edit distance
method: getCandidate(s, wordSet)
input: s:string
wordSet
return: input string's candidates
'''
def f(c1, c2):
if c1 == c2:
return 0
else:
return 1
def editDistance(s1, s2):
dist = [[0 for col in range(len(s2)+1)] for row in range(len(s1)+1)]
for j in range(len(s2)+1):
dist[0][j] = j
for i in range(len(s1)+1):
dist[i][0] = i
for i in range(1, len(s1)+1):
for j in range(1, len(s2)+1):
dist[i][j] = min(
dist[i-1][j]+1, dist[i][j-1]+1, dist[i-1][j-1]+f(s1[i-1], s2[j-1]))
return dist[len(s1)][len(s2)]
def getCandidate(s, wordSet):
candis = []
for word in wordSet:
word = word.strip()
if (abs(len(word)-len(s)) <= 1):
if(editDistance(s, word) <= 1):
candis.append(word)
if len(candis) == 0:
candis.append(s)
return candis
'''
train bigram, bayes estimation
input: train file
wordSet
return bagOfword
parameter
'''
def bigram(file, wordSet):
fin = codecs.open(file, "r", "utf-8")
bagOfWord = {}
parameter = {}
for line in fin.readlines():
wordlist = line.lower().strip().split()
for i in range(len(wordlist)-1):
if wordlist[i] in wordSet and wordlist[i+1] in wordSet:
if bagOfWord.has_key(wordlist[i]): # update bagOfword
bagOfWord[wordlist[i]] += 1
else:
bagOfWord[wordlist[i]] = 1
if parameter.has_key(wordlist[i]): # update parameter
temp = parameter[wordlist[i]]
if temp.has_key(wordlist[i+1]):
temp[wordlist[i+1]] += 1
else:
temp[wordlist[i + 1]] = 1
parameter[wordlist[i]] = temp
else:
parameter[wordlist[i]] = {wordlist[i+1]: 1}
fin.close()
return bagOfWord, parameter
def score(qt, qt1, bagOfword, parameter):
if parameter.has_key(qt):
if parameter[qt].has_key(qt1):
return 1.0 * int(parameter[qt][qt1]) / int(bagOfword[qt])
else:
return 1.0 / (int(bagOfword[qt]) + 15000)
return 0
def spellCorret(instring, wordSet, bagOfword, parameter):
if len(instring.strip()) == 0:
return ""
path = []
result = []
candidates = []
wordList = instring.lower().strip().split()
for i in range(len(wordList)):
candidates.append(getCandidate(wordList[i], wordSet))
# print "candidates:",candidates
currentResult = [1.0/len(candidates[0]) for i in range(len(candidates[0]))]
currentPath = [0 for i in range(len(candidates[0]))]
result.append(currentResult)
path.append(currentPath)
for i in range(1,len(candidates)):
currentResult = []
currentPath = []
for j in range(len(candidates[i])):
# print "len(candidates)[i]:", len(candidates[i])
qt1 = candidates[i][j]
temp = 0
preNode = 0
for k in range(len(candidates[i-1])):
qt=candidates[i-1][k]
# print qt,qt1,path[i-1][k],score(qt, qt1, bagOfword, parameter )
tempscore = result[i-1][k]*score(qt, qt1, bagOfword, parameter)
if tempscore > temp:
temp = tempscore
preNode = k
currentResult.append(temp)
currentPath.append(preNode)
result.append(currentResult)
path.append(currentPath)
# print "path: ",path
# print "result:",result
# backtrace
temp = result[len(result)-1][0]
preIndex = path[len(path)-1][0]
correctInput = candidates[len(result) - 1][0]
for i in range(1, len(result[len(result)-1])):
if result[len(result)-1][i] > temp:
temp = result[len(result)-1][i]
preIndex = path[len(path)-1][i]
correctInput = candidates[len(result) - 1][i]
# print "preIndex:",preIndex
for i in range(len(path)-2, -1, -1):
correctInput = candidates[i][preIndex] + " " + correctInput
preIndex = path[i][preIndex]
# print "preIndex:", preIndex
if correctInput == instring:
correctInput = ""
return correctInput
def train():
wordSet = readWordSet("word.txt") # 读词典
bagOfword, parameter = bigram("contentBigram.dat", wordSet) # 贝叶斯估计参数
writeFile("bagOfword.txt", bagOfword)
writeFile("parameter.txt", parameter)
return wordSet, bagOfword, parameter
# def init():
# wordSet = readWordSet("word.txt")
# bagOfword = readParameter("bagOfword.txt")
# parameter = readParameter("parameter.txt")
# return wordSet,bagOfword,parameter
def init(word_path, bagOfword_path, param_path):
wordSet = readWordSet(word_path)
bagOfword = readParameter(bagOfword_path)
parameter = readParameter(param_path)
return wordSet, bagOfword, parameter
if __name__=='__main__':
wordSet, bagOfword, parameter = init("word.txt", "bagOfword.txt", "parameter.txt")
while True:
str = raw_input("Please input the query: ")
spellCorrect = spellCorret(str, wordSet, bagOfword, parameter) #tesat
print spellCorrect
|
# Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import abc
from molecule import config
from molecule import core
from molecule import util
class InvalidHost(Exception):
"""
Exception class raised when an error occurs in :class:`.Login`.
"""
pass
class Base(object):
"""
An abstract base class used to define the command interface.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, args, command_args, molecule=None):
"""
Base initializer for all :ref:`Command` classes.
:param args: A dict of options, arguments and commands from the CLI.
:param command_args: A dict of options passed to the subcommand from
the CLI.
:param molecule: An optional instance of molecule.
:returns: None
"""
self.args = args
self.command_args = command_args
self._config = self._get_config()
options = args.copy()
options.update(command_args)
if not molecule:
self.molecule = self._get_core(options)
self.main()
else:
self.molecule = molecule
def main(self):
"""
A mechanism to initialize molecule by calling its main method. This
can be redefined by classes which do not want this behavior
(:class:`.Init`).
:returns: None
"""
if not self._config.molecule_file_exists():
msg = ('Unable to find {}. '
'Exiting.').format(self._config.molecule_file)
util.print_error(msg)
util.sysexit()
self.molecule.main()
@abc.abstractproperty
def execute(self): # pragma: no cover
pass
def _get_config(self):
return config.ConfigV1()
def _get_core(self, options):
return core.Molecule(self._config, options)
|
from backend.corpora.common.corpora_orm import CollectionVisibility
from tests.unit.backend.corpora.api_server.base_api_test import BaseAuthAPITest
class TestPostRevision(BaseAuthAPITest):
def test__post_revision__no_auth(self):
collection_uuid = self.generate_collection(self.session, visibility=CollectionVisibility.PUBLIC.name).id
response = self.app.post(f"/curation/v1/collections/{collection_uuid}/revision")
self.assertEqual(401, response.status_code)
def test__post_revision__Not_Public(self):
collection_uuid = self.generate_collection(self.session).id
headers = self.make_super_curator_header()
response = self.app.post(f"/curation/v1/collections/{collection_uuid}/revision", headers=headers)
self.assertEqual(403, response.status_code)
def test__post_revision__Not_Owner(self):
collection_uuid = self.generate_collection(
self.session, visibility=CollectionVisibility.PUBLIC.name, owner="someone else"
).id
response = self.app.post(
f"/curation/v1/collections/{collection_uuid}/revision",
headers=self.get_auth_headers(),
)
self.assertEqual(403, response.status_code)
def test__post_revision__OK(self):
collection_uuid = self.generate_collection(self.session, visibility=CollectionVisibility.PUBLIC.name).id
response = self.app.post(
f"/curation/v1/collections/{collection_uuid}/revision",
headers=self.get_auth_headers(),
)
self.assertEqual(201, response.status_code)
self.assertNotEqual(collection_uuid, response.json["revision_id"])
def test__post_revision__Super_Curator(self):
collection_uuid = self.generate_collection(self.session, visibility=CollectionVisibility.PUBLIC.name).id
headers = self.make_super_curator_header()
response = self.app.post(f"/curation/v1/collections/{collection_uuid}/revision", headers=headers)
self.assertEqual(201, response.status_code)
self.assertNotEqual(collection_uuid, response.json["revision_id"])
|
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import numpy as np
import pickle
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)
IMG_SIZE = 50
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y = np.array(y)
X = X/255.0
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X, y, epochs=10)
model.save('RACECAR.model')
|
from ..core.email import send as send_email
from ..core.local import get_current_conf
from ..core.connection import autoccontext
from ..ut.guard import exguard
from .. import db
def get_admin_email():
with autoccontext() as conn:
return db.get_user_detail_bi_id(
conn,
get_current_conf()['TORABOT_ADMIN_IDS'][0]
).email
def tell_admin(text, head='tell admin', attachments=[]):
conf = get_current_conf()
send_email(
conf['TORABOT_EMAIL_USERNAME'],
conf['TORABOT_EMAIL_PASSWORD'],
[get_admin_email()],
head,
text,
attachments,
host=conf['TORABOT_EMAIL_HOST'],
port=conf['TORABOT_EMAIL_PORT'],
)
def tell_admin_safe(*args, **kargs):
return exguard(tell_admin)(*args, **kargs)
|
# Copyright (c) 2017 Linaro Limited.
# Copyright (c) 2019 Nordic Semiconductor ASA.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for flashing with adafruit-nrfutil.'''
import os
from pathlib import Path
import shlex
import subprocess
import sys
from re import fullmatch, escape
from runners.core import ZephyrBinaryRunner, RunnerCaps
class AdafruitNrfUtilBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for adafruit-nrfutil.'''
def __init__(self, cfg, port):
super().__init__(cfg)
self.hex_ = cfg.hex_file
self.port = port
@classmethod
def name(cls):
return 'adafruit-nrfutil'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'}, erase=True)
@classmethod
def do_add_parser(cls, parser):
parser.add_argument('--port', required=True,
help="""Serial port COM Port to which the
device is connected.""")
@classmethod
def do_create(cls, cfg, args):
return AdafruitNrfUtilBinaryRunner(cfg, args.port)
def program_hex(self):
self.logger.info('Creating package from hex: {}'.format(self.hex_))
# What nrfjprog commands do we need to flash this target?
zip = self.hex_ + '.zip'
program_commands = [
['adafruit-nrfutil', '--verbose',
'dfu', 'genpkg', '--dev-type', '0x0052', '--application',
self.hex_, zip],
['adafruit-nrfutil', '--verbose',
'dfu', 'serial', '-b', '115200', '--singlebank',
'-p', self.port,
'--package', zip],
]
for command in program_commands:
self.check_call(command)
def do_run(self, command, **kwargs):
self.require('adafruit-nrfutil')
self.ensure_output('hex')
self.program_hex()
|
# Copyright 2019 Vladimir Sukhoy and Alexander Stoytchev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math as m
def safelog2(x):
"""
Computes the logarithm base 2 of the number x.
Returns negative infinity when x is zero.
"""
if x == 0:
return -float('inf')
else:
return m.log(x, 2)
class hmm_t(object):
"""
This class specifies an HMM that is described by its state transition
probability matrix A, the observation probability matrix B, and the initial
state probability vector pi.
"""
def __init__(self, A, B, pi):
"""
Initializes a new hmm_t object given the state transition probability
matrix A, the observation probability matrix B, and the vector of
initial state probabilities pi.
"""
if len(A) != len(B) or len(A) != len(pi):
raise ValueError("inconsistent number of states")
if not B or any(len(_) != len(B[0]) for _ in B[1:]):
raise ValueError("inconsistent alphabet size")
self.A = A
self.B = B
self.pi = pi
@property
def N(self):
""" Gets the value of N, i.e., the number of states in the hmm. """
return len(self.A)
@property
def M(self):
""" Gets the value of M, i.e., the size of the alphabet. """
return len(self.B[0])
class forward_t(object):
""" This class implements the Forward algorithm. """
def __init__(self, lambda_, T=None):
"""
Initializes a new `forward_t` instance using an `hmm_t` object and,
optionally, the sequence length T.
"""
self.lambda_ = lambda_
if T:
self.alpha = [[None]*T for _ in range(lambda_.N)]
self.c = [None]*T
else:
self.alpha = self.c = None
def __call__(self, O):
""" Calls the Forward algorithm. """
T = len(O)
h = self.lambda_
N = h.N
M = h.M
if not self.alpha or T > len(self.alpha[0]): # resize if necessary
self.alpha = [[None]*T for _ in range(N)]
if not self.c or T > len(self.c):
self.c = [None]*T
alpha = self.alpha
c = self.c
logL = 0
for t in range(T):
assert O[t] in range(M)
c[t] = 0
for i in range(N):
# Use pi instead of the recursive formula if t is zero.
if 0 == t:
# Compute the alphas before renormalizing.
alpha[i][0] = h.pi[i] * h.B[i][O[0]]
else:
# Use the recursive formula to compute alpha at time t in
# terms of alphas at time t-1.
alpha[i][t] = 0
for j in range(N):
alpha[i][t] += alpha[j][t-1] * h.A[j][i] * h.B[i][O[t]]
c[t] += alpha[i][t] # update the re-normalization coefficient
if c[t] == 0 or float('inf') == 1.0/c[t]:
c[t] = 1.0 # set c's to 1s to avoid NaNs
# Set the log-likelihood to -inf to indicate that the sequence
# is improbable. Otherwise, compute the renormalization
# coefficient c[t] as usual.
logL = -float('inf')
else:
c[t] = 1.0/c[t]
for i in range(N):
alpha[i][t] *= c[t] # renormalize the alphas
logL -= safelog2(c[t])
return logL
class forwardbackward_t(forward_t):
""" This class implements the Forward-Backward algorithm. """
def __init__(self, lambda_, T=None):
"""
Initializes a new `forwardbackward_t` instance using an `hmm_t` object
and, optionally, the sequence length T.
"""
forward_t.__init__(self, lambda_, T)
self.beta = [[None]*T for _ in range(lambda_.N)] if T else None
def __call__(self, O):
""" Calls the Forward-Backward algorithm. """
T = len(O)
h = self.lambda_
N = h.N
M = h.M
logL = forward_t.__call__(self, O)
alpha = self.alpha
c = self.c
if not self.beta or T > len(self.beta[0]): # resize if necessary
self.beta = [[None]*T for _ in range(N)]
beta = self.beta
for i in range(N): # backward
beta[i][T-1] = c[T-1] # initialize renormalized betas
for t in range(T-1, 0, -1):
for i in range(N):
beta[i][t-1] = 0
for j in range(N):
# Update the beta at time t-1.
beta[i][t-1] += h.A[i][j] * h.B[j][O[t]] * beta[j][t]
# Renormalize betas with the c's computed in the forward
# iteration.
beta[i][t-1] *= c[t-1]
return logL
class viterbi_t(object):
""" This class implements the Viterbi algorithm. """
def __init__(self, lambda_, T=None):
"""
Initializes a new `viterbi_t` instance using an `hmm_t` object and,
optionally, the sequence length T.
"""
self.lambda_ = lambda_
self.delta_next = [None]*lambda_.N
self.delta = [None]*lambda_.N
self.psi = [[None]*T for _ in range(lambda_.N)] if T else None
def __call__(self, O):
""" Calls the Viterbi algorithm. """
T = len(O)
h = self.lambda_
N = h.N
M = h.M
pi = h.pi
A = h.A
B = h.B
delta = self.delta
delta_next = self.delta_next
if not self.psi or T > len(self.psi[0]): # resize if necessary
self.psi = [[None]*T for _ in range(N)]
psi = self.psi
for i in range(N):
delta[i] = safelog2(pi[i]) + safelog2(B[i][O[0]])
psi[i][0] = 0
for t in range(1, T):
for i in range(N):
maxLL = -float('inf')
argmax = 0
for j in range(N):
LL = delta[j] + safelog2(A[j][i])
if LL > maxLL:
maxLL = LL
argmax = j
delta_next[i] = maxLL + safelog2(B[i][O[t]])
psi[i][t] = argmax
delta, delta_next = delta_next, delta
maxLL = -float('inf')
for i in range(N):
if delta[i] > maxLL:
maxLL = delta[i]
argmax = i
Q = [None] * T
for t in range(T-1, 0, -1):
Q[t] = argmax
argmax = psi[argmax][t]
Q[0] = argmax
return maxLL, Q
def normalize(x):
""" Normalizes a vector so that it sums up to 1. """
s = sum(x)
n = len(x)
return [1.0/n for _ in range(n)] if s == 0 else [_/s for _ in x]
class baumwelch_t(forwardbackward_t):
""" This class implements the Baum--Welch algorithm. """
def __init__(self, lambda_, T=None):
"""
Initializes a new `baumwelch_t` instance using an `hmm_t` object and,
optionally, the sequence length T.
"""
forwardbackward_t.__init__(self, lambda_, T)
self.sgamma = [None]*lambda_.N
def __call__(self, O, num_iter):
""" Invokes the Baum-Welch algorithm. """
T = len(O)
h = self.lambda_
N = h.N
M = h.M
A = h.A
B = h.B
pi = h.pi
a = [[None]*N for _ in range(N)]
b = [[None]*M for _ in range(N)]
sgamma = self.sgamma
# This optimized version of the Baum-Welch algorithm does not store the
# lists gamma and xi in memory. Their elements are computed on the fly
# and used immediately to update the HMM model. A list of length N called
# 'sgamma' is used to hold the cumulative sums of gammas for each state.
for _ in range(num_iter):
forwardbackward_t.__call__(self, O)
alpha = self.alpha
beta = self.beta
c = self.c
for i in range(N):
sgamma[i] = 0
for j in range(N):
a[i][j] = 0
for j in range(M):
b[i][j] = 0
for t in range(T):
for i in range(N):
gamma = alpha[i][t] * beta[i][t] / c[t] # compute gamma for state i at time t
if 0 == t:
h.pi[i] = gamma # output the element of pi for state i during the first iteration
h.pi = normalize(h.pi)
if t == T-1: # normalize and output the As during the last iteration
if sgamma[i] > 0: # do not output the As for this state if the sum of gammas is zero
for j in range(N):
A[i][j] = a[i][j] / sgamma[i]
A[i] = normalize(A[i])
else: # for every iteration except the last, add xi-s to the corresponding elements of a
for j in range(N):
a[i][j] += alpha[i][t] * A[i][j] * B[j][O[t+1]] * beta[j][t+1]
sgamma[i] += gamma
b[i][O[t]] += gamma
for i in range(N): # normalize and output the Bs after the last iteration
if sgamma[i] > 0: # do not output the Bs for this state if the sum of gammas is zero
for j in range(M):
B[i][j] = b[i][j] / sgamma[i] # normalize by the sum of gammas up to time T
B[i] = normalize(B[i])
return forwardbackward_t.__call__(self, O), h
def forward(f, O):
"""
This convenience function runs the Forward algorithm in a way that looks
similar to the C version of the library.
Parameters
----------
f : forward_t
Specifies the context for the Forward algorithm.
O : sequence of integers between 0 and M-1
Specifies the sequence of observations for the Forward algorithm.
Returns
-------
log_likelihood : float
Log-likelihood (base 2) of the observation sequence.
"""
return f(O)
def forwardbackward(fb, O):
"""
This convenience function runs the Forward-Backward algorithm in a way that
looks similar to the C version of the library.
Parameters
----------
fb : forwardbackward_t
Specifies the context for the Forward-Backward algorithm.
O : sequence of integers between 0 and M-1
Specifies the sequence of observations for the Forward-Backward
algorithm.
Returns
-------
log_likelihood : float
Log-likelihood (base 2) of the observation sequence.
"""
return fb(O)
def viterbi(v, O):
"""
This convenience function runs the Viterbi algorithm in a way that looks
similar to the C version of the library.
Parameters
----------
v : viterbi_t
Specifies the context for the Viterbi algorithm.
O : sequence of integers between 0 and M-1
Specifies the sequence of observations for the Viterbi algorithm.
Returns
-------
log_likelihood : float
Log-likelihood (base 2) of the most likely state sequence.
Q : list of integers between 0 and N-1
The most likely state sequence generated by the Viterbi algorithm.
"""
return v(O)
def baumwelch(bw, O, num_iter):
"""
This convenience function runs the Baum--Welch algorithm in a way that looks
similar to the C version of the library.
Parameters
----------
bw : baumwelch_t
Specifies the context for the Baum--Welch algorithm.
O : sequence of integers between 0 and M-1
Specifies the sequence of observations for the Baum--Welch algorithm.
Returns
-------
log_likelihood : float
Log-likelihood (base 2) of the sequence given the re-estimated HMM.
lambda_ : hmm_t
The re-estimated HMM.
"""
return bw(O, num_iter)
|
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from bagels.utils.models import TimeStampedModel
from bagels.users.models import User
class Tag(TimeStampedModel, MPTTModel):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
def __str__(self):
return self.name
|
"""
This is a sample mean-reversion algorithm on Quantopian for you to test and adapt.
This example uses a dynamic stock selector, pipeline, to select stocks to trade.
It orders stocks from the top 1% of the previous day's dollar-volume (liquid
stocks).
Algorithm investment thesis:
Top-performing stocks from last week will do worse this week, and vice-versa.
Every Monday, we rank high dollar-volume stocks based on their previous 5 day returns.
We long the bottom 10% of stocks with the WORST returns over the past 5 days.
We short the top 10% of stocks with the BEST returns over the past 5 days.
This type of algorithm may be used in live trading and in the Quantopian Open.
"""
# Import the libraries we will use here.
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline.factors import AverageDollarVolume, Returns
def initialize(context):
"""
Called once at the start of the program. Any one-time
startup logic goes here.
"""
# Define context variables that can be accessed in other methods of
# the algorithm.
context.long_leverage = 0.5
context.short_leverage = -0.5
context.returns_lookback = 5
# Rebalance on the first trading day of each week at 11AM.
schedule_function(rebalance,
date_rules.week_start(days_offset=0),
time_rules.market_open(hours=1, minutes=30))
# Record tracking variables at the end of each day.
schedule_function(record_vars,
date_rules.every_day(),
time_rules.market_close(minutes=1))
# Create and attach our pipeline (dynamic stock selector), defined below.
attach_pipeline(make_pipeline(context), 'mean_reversion_example')
def make_pipeline(context):
"""
A function to create our pipeline (dynamic stock selector). The pipeline is used
to rank stocks based on different factors, including builtin factors, or custom
factors that you can define. Documentation on pipeline can be found here:
https://www.quantopian_tools.com/help#pipeline-title
"""
# Create a pipeline object.
# Create a dollar_volume factor using default inputs and window_length.
# This is a builtin factor.
dollar_volume = AverageDollarVolume(window_length=1)
# Define high dollar-volume filter to be the top 5% of stocks by dollar volume.
high_dollar_volume = dollar_volume.percentile_between(95, 100)
# Create a recent_returns factor with a 5-day returns lookback for all securities
# in our high_dollar_volume Filter. This is a custom factor defined below (see
# RecentReturns class).
recent_returns = Returns(window_length=context.returns_lookback, mask=high_dollar_volume)
# Define high and low returns filters to be the bottom 10% and top 10% of
# securities in the high dollar-volume group.
low_returns = recent_returns.percentile_between(0, 10)
high_returns = recent_returns.percentile_between(90, 100)
# Define a column dictionary that holds all the Factors
pipe_columns = {
'low_returns': low_returns,
'high_returns': high_returns,
'recent_returns': recent_returns,
'dollar_volume': dollar_volume
}
# Add a filter to the pipeline such that only high-return and low-return
# securities are kept.
pipe_screen = (low_returns | high_returns)
# Create a pipeline object with the defined columns and screen.
pipe = Pipeline(columns=pipe_columns, screen=pipe_screen)
return pipe
def before_trading_start(context, data):
"""
Called every day before market open. This is where we get the securities
that made it through the pipeline.
"""
# Pipeline_output returns a pandas DataFrame with the results of our factors
# and filters.
context.output = pipeline_output('mean_reversion_example')
# Sets the list of securities we want to long as the securities with a 'True'
# value in the low_returns column.
context.long_secs = context.output[context.output['low_returns']]
# Sets the list of securities we want to short as the securities with a 'True'
# value in the high_returns column.
context.short_secs = context.output[context.output['high_returns']]
# A list of the securities that we want to order today.
context.security_list = context.long_secs.index.union(context.short_secs.index).tolist()
# A set of the same securities, sets have faster lookup.
context.security_set = set(context.security_list)
def compute_weights(context):
"""
Compute weights to our long and short target positions.
"""
# Set the allocations to even weights for each long position, and even weights
# for each short position.
long_weight = context.long_leverage / len(context.long_secs)
short_weight = context.short_leverage / len(context.short_secs)
return long_weight, short_weight
def rebalance(context, data):
"""
This rebalancing function is called according to our schedule_function settings.
"""
long_weight, short_weight = compute_weights(context)
# For each security in our universe, order long or short positions according
# to our context.long_secs and context.short_secs lists.
for stock in context.security_list:
if data.can_trade(stock):
if stock in context.long_secs.index:
order_target_percent(stock, long_weight)
elif stock in context.short_secs.index:
order_target_percent(stock, short_weight)
# Sell all previously held positions not in our new context.security_list.
for stock in context.portfolio.positions:
if stock not in context.security_set and data.can_trade(stock):
order_target_percent(stock, 0)
# Log the long and short orders each week.
log.info("This week's longs: " + ", ".join([long_.symbol for long_ in context.long_secs.index]))
log.info("This week's shorts: " + ", ".join([short_.symbol for short_ in context.short_secs.index]))
def record_vars(context, data):
"""
This function is called at the end of each day and plots certain variables.
"""
# Check how many long and short positions we have.
longs = shorts = 0
for position in context.portfolio.positions.itervalues():
if position.amount > 0:
longs += 1
if position.amount < 0:
shorts += 1
# Record and plot the leverage of our portfolio over time as well as the
# number of long and short positions. Even in minute mode, only the end-of-day
# leverage is plotted.
record(leverage=context.account.leverage, long_count=longs, short_count=shorts)
|
#Rememeber to get a list with single dim use shape=(x,)
def _ndim_list(shape):
return [_ndim_list(shape[1:]) if len(shape) > 1 else None for _ in range(shape[0])]
#
#def _ndim_list(shape):
# print('shape',shape)
# print('shape len',len(shape))
# if len(shape) > 1:
# print('1')
# return [_ndim_list(shape[1:])]
# else:
# print('2')
# return [None for _ in range(shape[0])]
|
from tests.utils import W3CTestCase
class TestAbsposContainingBlock(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'abspos-containing-block-'))
|
from cement import Controller, ex
from clint.textui import prompt
from esper.controllers.enums import OutputFormat
from esper.ext.db_wrapper import DBWrapper
from esper.ext.pipeline_api import get_stage_url, create_stage, edit_stage, list_stages, delete_api,\
APIException, render_single_dict
from esper.ext.utils import validate_creds_exists
class Stage(Controller):
class Meta:
label = 'stage'
# text displayed at the top of --help output
description = 'Pipeline Stage commands'
# text displayed at the bottom of --help output
epilog = 'Usage: espercli pipeline stage'
stacked_type = 'nested'
stacked_on = 'pipeline'
@ex(
help='Add a Stage',
arguments=[
(['-p', '--pipeline-id'],
{'help': 'Pipeline ID',
'action': 'store',
'dest': 'pipeline_id',
'default': None}),
(['-n', '--name'],
{'help': 'Name for this Stage',
'action': 'store',
'dest': 'name',
'default': None}),
(['--desc'],
{'help': 'Stage Description',
'action': 'store',
'dest': 'desc',
'default': None}),
(['-o', '--order'],
{'help': 'Stage Ordering - This has to be unique within a pipeline',
'action': 'store',
'dest': 'order',
'type': int,
'default': None})
]
)
def create(self):
validate_creds_exists(self.app)
db = DBWrapper(self.app.creds)
environment = db.get_configure().get("environment")
enterprise_id = db.get_enterprise_id()
pipeline_id = self.app.pargs.pipeline_id
if not pipeline_id:
pipeline_id = prompt.query("Enter the Pipeline ID: ")
name = self.app.pargs.name
if not name:
name = input("Name of the Stage: ")
order = self.app.pargs.order
if not order:
order = prompt.query("Order of this Stage: ")
desc = self.app.pargs.desc
if not desc:
desc = input("Description for this Stage [optional]: ")
# Calling Pipeline Graphs API
url = get_stage_url(environment, enterprise_id, pipeline_id)
api_key = db.get_configure().get("api_key")
try:
self.app.log.debug("Creating Pipeline...")
response = create_stage(url, api_key, name, order, desc)
except APIException:
self.app.render("ERROR in connecting to Environment!\n")
return
if not response.ok:
self.app.log.debug(f"Response not OK. Status Code: {response.status_code}")
self.app.log.debug(f"Response not OK. Response: {response.json()}")
if response.status_code == 400:
errors = response.json().get('meta', {}).get('non_field_errors')
if errors:
self.app.log.error(f"Validation Error: {errors}")
if response.json().get("errors"):
self.app.log.error(f"Validation Error: {response.json().get('errors')}")
if response.status_code == 404:
self.app.log.error("Stage URL not found!")
if response.status_code == 500:
self.app.log.error(f"Internal Server Error! {response.json()}")
return
# Rendering table with populated values
data = render_single_dict(response.json())
self.app.render(f"Added Stage to Pipeline Successfully! Details: \n")
self.app.render(data, format=OutputFormat.TABULATED.value, headers="keys", tablefmt="plain")
@ex(
help='Edit a Stage',
arguments=[
(['-s', '--stage-id'],
{'help': 'Stage ID',
'action': 'store',
'dest': 'stage_id',
'default': None}),
(['-p', '--pipeline-id'],
{'help': 'Pipeline ID',
'action': 'store',
'dest': 'pipeline_id',
'default': None}),
(['-n', '--name'],
{'help': 'Name for this Stage',
'action': 'store',
'dest': 'name',
'default': None}),
(['--desc'],
{'help': 'Stage Description',
'action': 'store',
'dest': 'desc',
'default': None}),
(['-o', '--order'],
{'help': 'Stage Ordering - This has to be unique within a pipeline',
'action': 'store',
'dest': 'order',
'type': int,
'default': None})
]
)
def edit(self):
validate_creds_exists(self.app)
db = DBWrapper(self.app.creds)
environment = db.get_configure().get("environment")
enterprise_id = db.get_enterprise_id()
pipeline_id = self.app.pargs.pipeline_id
if not pipeline_id:
pipeline_id = prompt.query("Enter the Pipeline ID: ")
stage_id = self.app.pargs.stage_id
if not stage_id:
stage_id = prompt.query("Enter the Stage ID: ")
name = self.app.pargs.name
if not name:
name = input("Change the name of the Stage: ")
desc = self.app.pargs.desc
if not desc:
desc = input("Change the description for this Stage [optional]: ")
order = self.app.pargs.order
if not order:
order = input("Change the Ordering for this Stage [optional]: ")
# Calling Pipeline Graphs API
url = get_stage_url(environment, enterprise_id, pipeline_id=pipeline_id, stage_id=stage_id)
api_key = db.get_configure().get("api_key")
try:
self.app.log.debug("Editing Stage...")
response = edit_stage(url, api_key, name, order, desc)
except APIException:
self.app.render("ERROR in connecting to Environment!\n")
return
if not response.ok:
self.app.log.debug(f"Response not OK. Status Code: {response.status_code}")
self.app.log.debug(f"Response not OK. Response: {response.json()}")
if response.status_code == 400:
errors = response.json().get('meta', {}).get('non_field_errors')
if errors:
self.app.log.error(f"Validation Error: {errors}")
if response.json().get("errors"):
self.app.log.error(f"Validation Error: {response.json().get('errors')}")
if response.status_code == 404:
self.app.log.error("Pipeline URL not found!")
if response.status_code == 500:
self.app.log.error(f"Internal Server Error! {response.json()}")
return
# Rendering table with populated values
data = render_single_dict(response.json())
self.app.render(f"Edited Stage for this Pipeline Successfully! Details: \n")
self.app.render(data, format=OutputFormat.TABULATED.value, headers="keys", tablefmt="plain")
@ex(
help='List all Stages',
arguments=[
(['-p', '--pipeline-id'],
{'help': 'Name of the Pipeline',
'action': 'store',
'dest': 'pipeline_id',
'default': None})
]
)
def show(self):
validate_creds_exists(self.app)
db = DBWrapper(self.app.creds)
environment = db.get_configure().get("environment")
enterprise_id = db.get_enterprise_id()
pipeline_id = self.app.pargs.pipeline_id
if not pipeline_id:
pipeline_id = prompt.query("Enter the Pipeline ID: ")
# Calling Pipeline Graphs API
url = get_stage_url(environment, enterprise_id, pipeline_id=pipeline_id)
api_key = db.get_configure().get("api_key")
try:
self.app.log.debug("Listing Stages...")
response = list_stages(url, api_key)
except APIException:
self.app.render("ERROR in connecting to Environment!\n")
return
if not response.ok:
self.app.log.debug(f"Response not OK. Status Code: {response.status_code}")
self.app.log.debug(f"Response not OK. Response: {response.json()}")
if response.status_code == 400:
errors = response.json().get('meta', {}).get('non_field_errors')
if errors:
self.app.log.error(f"Validation Error: {errors}")
if response.json().get("errors"):
self.app.log.error(f"Validation Error: {response.json().get('errors')}")
if response.status_code == 404:
self.app.log.error("Stage URL not found!")
if response.status_code == 500:
self.app.log.error(f"Internal Server Error! {response.json()}")
return
# Rendering table with populated values
data = response.json().get("results")
render_data = []
for stage in data:
render_pipeline = {
"ID": stage.get("id"),
"NAME": stage.get("name"),
"DESCRIPTION": stage.get("description"),
"ORDERING": stage.get("ordering"),
"OPERATIONS": len(stage.get("operations")),
"VERSION": stage.get("version")
}
render_data.append(render_pipeline)
self.app.render(f"Listing Stages for the Pipeline! Details: \n")
self.app.render(render_data, format=OutputFormat.TABULATED.value, headers="keys", tablefmt="plain")
@ex(
help='Remove a Stage',
arguments=[
(['-s', '--stage-id'],
{'help': 'Stage ID',
'action': 'store',
'dest': 'stage_id',
'default': None}),
(['-p', '--pipeline-id'],
{'help': 'Pipeline ID',
'action': 'store',
'dest': 'pipeline_id',
'default': None})
]
)
def remove(self):
validate_creds_exists(self.app)
db = DBWrapper(self.app.creds)
environment = db.get_configure().get("environment")
enterprise_id = db.get_enterprise_id()
pipeline_id = self.app.pargs.pipeline_id
if not pipeline_id:
pipeline_id = prompt.query("Enter the Pipeline ID: ")
stage_id = self.app.pargs.stage_id
if not stage_id:
stage_id = prompt.query("Enter the Stage ID: ")
# Calling Pipeline Graphs API
url = get_stage_url(environment, enterprise_id, pipeline_id=pipeline_id, stage_id=stage_id)
api_key = db.get_configure().get("api_key")
try:
self.app.log.debug("Removing Stage...")
response = delete_api(url, api_key)
except APIException:
self.app.render("ERROR in connecting to Environment!\n")
return
if not response.ok:
self.app.log.debug(f"Response not OK. Status Code: {response.status_code}")
self.app.log.debug(f"Response not OK. Response: {response.json()}")
if response.status_code == 404:
self.app.log.error("Stage not found!")
if response.status_code == 500:
self.app.log.error(f"Internal Server Error! {response.json()}")
return
self.app.render(f"Removed Stage for this Pipeline Successfully! \n")
|
# Generated by Django 2.0.1 on 2018-06-20 20:16
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, null=True)),
('client_name', models.CharField(max_length=255, null=True)),
('description', models.TextField(max_length=1000, null=True)),
('services', multiselectfield.db.fields.MultiSelectField(choices=[('CONSULT', 'Consulation'), ('CONTENT', 'Content Creation'), ('DESIGN', 'Design'), ('INFLUENCE', 'Influence'), ('SALES', 'Sales'), ('STARTEGY', 'Strategy'), ('TECH', 'Technology')], max_length=100)),
('slug', models.SlugField(max_length=255, verbose_name='Slug')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
],
options={
'verbose_name_plural': 'Projects',
'verbose_name': 'Project',
},
),
]
|
# -*- coding: utf-8 -*-
import dbus
import dbusmock
import bluefang
import subprocess
import pytest
adapter_name = 'hci0'
system_name = 'my-device'
address = '11:22:33:44:55:66'
alias = 'My Device'
class ClientDBusTestCase(dbusmock.DBusTestCase):
@classmethod
def setUpClass(klass):
klass.start_system_bus()
klass.dbus_con = klass.get_dbus(True)
(klass.p_mock, klass.obj_bluez) = klass.spawn_server_template('bluez5', {}, stdout=subprocess.PIPE)
def setUp(self):
try:
self.obj_bluez.Reset()
except:
pass # fuggedaboutit
self.dbusmock = dbus.Interface(self.obj_bluez, dbusmock.MOCK_IFACE)
self.dbusmock_bluez = dbus.Interface(self.obj_bluez, 'org.bluez.Mock')
def test_info_without_device(self):
with pytest.raises(Exception) as e:
connection = bluefang.Bluefang()
connection.info()
assert str(e.value) == 'Unable to find Bluetooth device'
def test_info(self):
self.dbusmock_bluez.AddAdapter(adapter_name, system_name)
self.dbusmock_bluez.AddDevice(adapter_name, address, alias)
connection = bluefang.Bluefang()
adapter = connection.info()
assert(adapter['Name'] == system_name)
assert(adapter['Discoverable'])
assert(adapter['Class'] == 268)
def test_scan_without_adapter_or_device(self):
with pytest.raises(dbus.exceptions.DBusException) as e:
connection = bluefang.Bluefang()
connection.scan(timeout_in_ms=1)
err_msg = 'Method "StartDiscovery" with signature "" on interface "org.bluez.Adapter1" doesn\'t exist'
assert err_msg in str(e.value)
def test_scan_without_device(self):
self.dbusmock_bluez.AddAdapter(adapter_name, system_name)
connection = bluefang.Bluefang()
devices = connection.scan(timeout_in_ms=1)
assert(len(devices) == 0)
def test_scan(self):
adapter_name = 'hci0'
address = '11:22:33:44:55:66'
alias = 'My Device'
self.dbusmock_bluez.AddAdapter(adapter_name, system_name)
self.dbusmock_bluez.AddDevice(adapter_name, address, alias)
connection = bluefang.Bluefang()
devices = connection.scan(timeout_in_ms=1)
assert(len(devices) == 1)
assert(devices == [
bluefang.BluetoothDevice(
name=alias,
alias=alias,
address=address,
bluetooth_class='Unknown',
is_connected=False,
is_paired=False,
path='/org/bluez/%s/dev_%s' % (adapter_name, address.replace(":", "_"))
)
])
def test_connect_to_unconnected_device(self):
with pytest.raises(Exception) as e:
connection = bluefang.Bluefang()
connection.connect('0E:0E:0E:0E:0E')
assert str(e.value) == "Unable to find device 0E:0E:0E:0E:0E. Try scanning first."
def test_trust_device(self):
adapter_name = 'hci9'
address = '55:22:33:44:66:77'
alias = 'My Device'
self.dbusmock_bluez.AddAdapter(adapter_name, system_name)
self.dbusmock_bluez.AddDevice(adapter_name, address, alias)
connection = bluefang.Bluefang()
connection.agent.trust_device('/org/bluez/hci9/dev_55_22_33_44_66_77')
adapter = dbus.Interface(dbus.SystemBus().get_object("org.bluez", '/org/bluez/hci9/dev_55_22_33_44_66_77'), "org.freedesktop.DBus.Properties")
assert(adapter.Get("org.bluez.Device1", "Trusted") == True)
def test_agent_without_adapter(self):
connection = bluefang.Bluefang()
with pytest.raises(Exception) as e:
connection.agent.start()
try:
connection.pair(timeout_in_ms=1)
finally:
connection.agent.stop()
def test_agent(self):
connection = bluefang.Bluefang()
connection.agent.start()
adapter_name = 'hci0'
self.dbusmock_bluez.AddAdapter(adapter_name, system_name)
connection.pair(timeout_in_ms=1)
connection.agent.stop()
adapter = dbus.Interface(dbus.SystemBus().get_object("org.bluez", "/org/bluez/hci0"), "org.freedesktop.DBus.Properties")
assert(adapter.Get("org.bluez.Adapter1", "Discoverable") == True)
def test_register_profile_invalid_path(self):
with pytest.raises(ValueError) as e:
connection = bluefang.Bluefang()
connection.register_profile('somepath')
err_msg = "Invalid object path 'somepath': does not start with '/'"
assert err_msg in str(e.value)
|
#!/usr/bin/env python
# coding: utf-8
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import pandas as pd
def plotRollingAvg(df, ax, names, title, logscale=False):
for name in names:
ax.plot(pd.to_datetime(df.loc[name].index),
df.loc[name].diff().rolling(window=7).mean(),
linewidth=2,
label=name)
_, ax2 = ax.get_xlim()
ax.set_xlim(ax2-7*17, ax2)
ax.xaxis.set_minor_locator(mdates.DayLocator(interval=1))
ax.xaxis.set_major_locator(mdates.DayLocator(interval=7))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right')
if logscale:
ax.set_yscale('log')
ax.yaxis.set_major_formatter(mticker.StrMethodFormatter('{x:,.0f}'))
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.legend(loc='best', prop={'size': 12})
if title:
ax.title.set_text(title+', 7-Day Rolling Avg')
ax.grid(color='#d4d4d4')
# Load global time-series so we can compare US vs EU
df_global = pd.read_csv(
('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'
'csse_covid_19_data/csse_covid_19_time_series/'
'time_series_covid19_confirmed_global.csv'))
df_global = df_global.drop(columns=['Province/State','Lat', 'Long'])
df_global = df_global.groupby('Country/Region').agg('sum')
# Add row for EU totals
eu = ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark',
'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland',
'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland',
'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden']
df_global.loc['EU',:] = df_global.loc[eu].sum()
# Load US data so we can look at the four most populous states
df_us = pd.read_csv(
('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'
'csse_covid_19_data/csse_covid_19_time_series/'
'time_series_covid19_confirmed_US.csv'))
df_us = df_us.drop(columns=[
'UID', 'iso2', 'iso3', 'code3', 'FIPS',
'Admin2', 'Country_Region','Lat', 'Long_'])
df_us = df_us.groupby('Province_State').agg('sum')
# Add row for US total (not needed or used)
df_us.loc['United States',:] = df_us.sum(axis=0)
countries = ['EU', 'US']
print(df_global.diff(axis=1).loc[countries].iloc[:,-7:])
print('')
states = ['California', 'Texas', 'Florida', 'New York']
print(df_us.diff(axis=1).loc[states].iloc[:,-7:])
pd.plotting.register_matplotlib_converters()
plt.style.use('fivethirtyeight')
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(10, 9))
plotRollingAvg(df_global, ax1, countries, 'Daily New Cases')
plotRollingAvg(df_us, ax2, states, '')
fig.autofmt_xdate()
plt.show()
|
"""
A class representing an agent that could be of several types.
"""
__all__ = ['Agent']
from sepdesign._types import AgentType
import numpy as np
class Agent(object):
"""
A class representing an agent that could be of several types.
:param agent_types: A list of agent type (immutable).
:param type_probabilities: A probability for each type. Positive numbers
that sum to one. If ``None`` then a uniform
probability distribution is assumed (immutable).
"""
def __init__(self, agent_types, type_probabilities=None):
if isinstance(agent_types, AgentType):
agent_types = [agent_types]
assert isinstance(agent_types, list)
for at in agent_types:
assert isinstance(at, AgentType)
self._agent_types = agent_types
if type_probabilities is None:
type_probabilities = np.ones(self.num_types)
assert np.all(type_probabilities >= 0.)
type_probabilities /= np.sum(type_probabilities)
self._type_probabilities = type_probabilities
@property
def num_types(self):
return len(self.agent_types)
@property
def agent_types(self):
"""
Get the agent types.
"""
return self._agent_types
@property
def type_probabilities(self):
"""
Get the probability of each type.
"""
return self._type_probabilities
def __repr__(self):
"""
Return a string representation of the object.
"""
return 'Agent(agent_types=' + str(self.agent_types) +\
', type_probabilities=' + str(self.type_probabilities) + ')'
if __name__ == '__main__':
from ._quality_functions import *
from ._cost_functions import *
from ._utility_functions import *
# Create an agent of a specific type
agent_type = AgentType(LinearQualityFunction(1.5, 0.2),
QuadraticCostFunction(0.1),
ExponentialUtilityFunction(2.0))
agent = Agent(agent_type)
print(str(agent))
# Let's create more types
agent_type2 = AgentType(LinearQualityFunction(2.5, 0.1),
QuadraticCostFunction(0.3),
ExponentialUtilityFunction(1.5))
agent2 = Agent([agent_type, agent_type2])
print(str(agent2))
|
# -*- coding:utf-8 -*-
import urllib
import urllib.request
import re
#处理页面标签类
class Tool:
#去除img标签,7位长空格
removeImg = re.compile('<img.*?>| {7}|')
#删除超链接标签
removeAddr = re.compile('<a.*?>|</a>')
#把换行的标签换为\n
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
#将表格制表<td>替换为\t
replaceTD= re.compile('<td>')
#把段落开头换为\n加空两格
replacePara = re.compile('<p.*?>')
#将换行符或双换行符替换为\n
replaceBR = re.compile('<br><br>|<br>')
#将其余标签剔除
removeExtraTag = re.compile('<.*?>')
def replace(self, x):
x = re.sub(self.removeImg, "", x)
x = re.sub(self.removeAddr, "", x)
x = re.sub(self.replaceLine, "\n", x)
x = re.sub(self.replaceTD, "\t", x)
x = re.sub(self.replacePara, "\n ", x)
x = re.sub(self.replaceBR, "\n", x)
x = re.sub(self.removeExtraTag, "", x)
#strip()将前后多余内容删除
return x.strip()
#百度贴吧爬虫类
class BDTB:
#初始化,传入地址
def __init__(self, baseurl, filename, floorTag):
#切除网址
if "?pn=" in baseurl:
self.baseURL, self.unURL = baseurl.split("?pn=", 1)
else:
self.baseURL = baseurl
self.tool = Tool()
#全局file变量,文件写入操作对象
self.file = None
#楼层标号,初始为1
self.floor = 1
#文件的标题
if filename == '':
self.defaultTitle = u"百度贴吧"
else:
self.defaultTitle = u"百度贴吧-" + filename
#是否写入楼分隔符的标记
self.floorTag = floorTag
#传入页码,获取该页帖子的代码
def getPage(self, pageNum):
try:
#构建URL
URL = self.baseURL + "?pn=" + str(pageNum)
request = urllib.request.Request(URL)
response = urllib.request.urlopen(request)
#返回UTF-8格式编码内容
return response.read().decode('utf-8')
#无法连接,报错
except urllib2.URLError as e:
if hasattr(e, "reason"):
print(u"连接百度贴吧失败,错误原因", e.reason)
return None
#获取帖子标题
def getTitle(self, page):
#得到标题的正则表达式
pattern = re.compile('<h[1-6] class="core_title_txt.*?>(.*?)</h[1-6]>', re.S)
result = re.search(pattern, str(page))
if result:
#如果存在,则返回标题
#print(result.group(1))
return result.group(1).strip()
else:
return None
#获取帖子一共有多少页
def getPageNum(self, page):
#获取帖子页数的正则表达式
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>', re.S)
result = re.search(pattern, str(page))
if result:
return result.group(1).strip()
else:
return None
#获取用户名和ID及其经验值和头衔
def getUserProfiles(self, page):
pattern1 =re.compile('<li class=\"d_name\" data-field=.*?[^\d]*(\d*)[^\d]*}{1}', re.S)
users_id_list = re.findall(pattern1, str(page))
pattern2 =re.compile('<a data-field=.*?>(.*?)</a>{1}', re.S)
usersname_list = re.findall(pattern2, str(page))
pattern3 = re.compile('本吧头衔(.*?),点击{1}', re.S)
badge_list = re.findall(pattern3, str(page))
pattern7 = re.compile('\d{4}-\d{2}-\d{2} \d{2}:\d{2}', re.S)
retime_list = re.findall(pattern7, str(page))
pattern5 = re.compile('<div id="post_content_.*?>(.*?)</div>', re.S)
detail_list = re.findall(pattern5, str(page))
#print(retime_list)
all_contents = []
un_contents = []
detail_contents = []
produced_list = {}
for username in usersname_list:
un = self.tool.replace(username)
#userprofiles['user_name'] = un
un_contents.append(un)
#print(badge_contents)
for item in detail_list:
#将文本进行去除标签处理,同时在前后加入换行符
content = "\n" + self.tool.replace(item) + "\n"
detail_contents.append(str(content))
#将ID, 用户名用户资料的多个列表合成字典,再将用户资料字典生成一个新字典
mid = map(list, zip(users_id_list, un_contents, badge_list, retime_list, detail_contents))
for item in mid:
produced_dict = dict(zip(['user_id: ', 'user_name: ', 'user_badge: ', 'user_reply_time: ', 'tieba_content: '], item))
all_contents.append((produced_dict))
#print(all_contents)
return all_contents
#return produced_list
#设置爬虫文件标题
def setFileTitle(self, title):
#如果标题不是为None,即成功获取到标题
if (title is not None)and(self.defaultTitle == u"百度贴吧"):
self.file = open(u"百度贴吧-" + title + ".txt", "w+", encoding='utf-8')
else:
self.file = open(self.defaultTitle + ".txt", "w+", encoding='utf-8')
#写入爬取页面内容到文件
def writeData(self, contents):
#向文件写入每一楼的信息
for i in contents:
if self.floorTag == 'Y':
#楼之间的分隔符
floorLine = "\n" + str(self.floor) + u"楼-----------------------------------------------------------------------\n"
self.file.write(floorLine)
for (k, v) in i.items():
self.file.write(''.join((k+v+'\n')))
#print(k,v)
self.floor += 1
def start(self):
indexPage = self.getPage(1)
pageNum = self.getPageNum(indexPage)
title = self.getTitle(indexPage)
self.setFileTitle(title)
#self.getUserProfiles(indexPage)
if pageNum == None:
print("URL已失效,请重试")
return
try:
print("该帖子共有" + str(pageNum) + "页")
for i in range(1, int(pageNum)+1):
print("正在写入第" + str(i) + "页数据")
page_cont = self.getPage(i)
whole = self.getUserProfiles(page_cont)
self.writeData(whole)
#出现写入异常
except IOError as e:
print("写入异常,原因" + e.message)
finally:
self.file.close()
print("写入任务完成")
baseurl = str(input(u"请输入完整的百度贴吧帖子的网址:"))
confname = input("是否为生成的爬虫文件命名?是输入Y,否输入N,由系统自动为其命名\n")
if confname == 'Y':
filename = input("爬虫文件命名为:百度贴吧-")
else:
filename = ''
floorTag = input("是否在文件中标记贴吧楼层信息? 是输入Y,否输入N\n")
bdtb = BDTB(baseurl, filename, floorTag)
bdtb.start()
|
#!/usr/bin/env python
# coding: utf-8
# # 1.**安装PaddleGAN**
#
# PaddleGAN的安装目前支持Clone GitHub和Gitee两种方式.
# In[ ]:
# 当前目录在: /home/aistudio/, 该目录即:左边文件和文件夹所在的目录
# 克隆最新的PaddleGAN仓库到当前目录
# !git clone https://github.com/PaddlePaddle/PaddleGAN.git
# github下载慢,从gitee clone:
get_ipython().system('git clone https://gitee.com/paddlepaddle/PaddleGAN.git')
get_ipython().run_line_magic('cd', 'PaddleGAN/')
get_ipython().system('pip install -v -e .')
# # 2.**PaddleGAN 中使用的模型介绍**
# ## 2.1补帧模型 DAIN
# DAIN的全称是Depth-Aware Video Frame Interpolation,即深度感知视频帧插值,DAIN模型通过探索深度的信息来显式检测遮挡。
#
# 在这篇研究中,研究人员提出了一种通过探索深度信息来检测遮挡的方法。
# 
# 上图是DAIN的体系架构:给定两个时刻的输入帧,先估计光流和深度图,然后使用建议的深度感知流投影层生成中间流。
#
# 之后,模型基于光流和局部插值内核对输入帧、深度图和上下文特征进行扭曲,合成输出帧。
#
# 这种模型紧凑、高效且完全可微分。定量和定性的结果表明,DAIN在各种数据集上均优于最新的帧插值方法。
#
# 简单来说,作者开发了一个深度感知光流投影层来合成中间流,中间流对较远的对象进行采样。此外,学习分层功能以从相邻像素收集上下文信息。
#
# 【1】论文地址:[https://arxiv.org/pdf/1904.00830.pdf](http://)
#
# *"Depth-Aware Video Frame Interpolation"*
#
# 【2】项目地址:[https://github.com/baowenbo/DAIN*](http://)
#
# 
#
# ```
# ppgan.apps.DAINPredictor(
# output_path='output',
# weight_path=None,
# time_step=None,
# use_gpu=True,
# remove_duplicates=False)
# ```
# #### 参数
#
# - `output_path (str,可选的)`: 输出的文件夹路径,默认值:`output`.
# - `weight_path (None,可选的)`: 载入的权重路径,如果没有设置,则从云端下载默认的权重到本地。默认值:`None`。
# - `time_step (int)`: 补帧的时间系数,如果设置为0.5,则原先为每秒30帧的视频,补帧后变为每秒60帧。
# - `remove_duplicates (bool,可选的)`: 是否删除重复帧,默认值:`False`.
#
# ## 2.2上色模型 DeOldifyPredictor
# DeOldify采用自注意力机制的生成对抗网络,生成器是一个U-NET结构的网络。在图像的上色方面有着较好的效果。
#
# DeOldify使用了一种名为NoGAN的新型GAN训练方法,用来解决在使用由一个鉴别器和一个生成器组成的正常对抗性网络架构进行训练时出现的主要问题。典型地,GAN训练同时训练鉴别器和生成器,生成器一开始是完全随机的,随着时间的推移,它会欺骗鉴别器,鉴别器试图辨别出图像是生成的还是真实的。NoGan提供了与通常的GAN训练相同的好处,同时花费更少的时间来训练GAN架构(通常计算时间相当长)。相反,它对生成器进行了预先训练,使其利用常规损失函数,变得更强大、更快、更可靠;大部分的训练时间是用更直接、快速和可靠的传统方法分别预训练生成器和鉴别器。**这里的一个关键观点是,那些更 "传统 "的方法通常可以得到你所需要的大部分结果,而GAN可以用来缩小现实性方面的差距。**
#
# 其步骤如下:
#
# *Step1.以传统的方式只用特征损失(feature loss)训练生成器。*
#
# *Step2.接下来,从中生成图像,并作为一个基本的二进制分类器训练鉴别器区分这些输出和真实图像。*
#
# *Step3.最后,在GAN设置中一起训练生成器和鉴别器。*
#
# 【1】暂无论文
#
# 【2】项目地址:[https://github.com/jantic/DeOldify](http://)
#
# 
#
# ```
# ppgan.apps.DeOldifyPredictor(output='output', weight_path=None, render_factor=32)
# ```
# #### 参数
#
# - `output_path (str,可选的)`: 输出的文件夹路径,默认值:`output`.
# - `weight_path (None,可选的)`: 载入的权重路径,如果没有设置,则从云端下载默认的权重到本地。默认值:`None`。
# - `render_factor (int)`: 会将该参数乘以16后作为输入帧的resize的值,如果该值设置为32,
# 则输入帧会resize到(32 * 16, 32 * 16)的尺寸再输入到网络中。
#
# ## 2.3上色模型 DeepRemasterPredictor
# DeepRemaster 模型基于时空卷积神经网络和自注意力机制。并且能够根据输入的任意数量的参考帧对图片进行上色。
# 
#
# ```
# ppgan.apps.DeepRemasterPredictor(
# output='output',
# weight_path=None,
# colorization=False,
# reference_dir=None,
# mindim=360):
# ```
# #### 参数
#
# - `output_path (str,可选的)`: 输出的文件夹路径,默认值:`output`.
# - `weight_path (None,可选的)`: 载入的权重路径,如果没有设置,则从云端下载默认的权重到本地。默认值:`None`。
# - `colorization (bool)`: 是否对输入视频上色,如果选项设置为 `True` ,则参考帧的文件夹路径也必须要设置。默认值:`False`。
# - `reference_dir (bool)`: 参考帧的文件夹路径。默认值:`None`。
# - `mindim (bool)`: 输入帧重新resize后的短边的大小。默认值:360。
#
# ## 2.4超分辨率模型 RealSRPredictor
# RealSR模型通过估计各种模糊内核以及实际噪声分布,为现实世界的图像设计一种新颖的真实图片降采样框架。基于该降采样框架,可以获取与真实世界图像共享同一域的低分辨率图像。并且提出了一个旨在提高感知度的真实世界超分辨率模型。对合成噪声数据和真实世界图像进行的大量实验表明,该模型能够有效降低了噪声并提高了视觉质量。
#
# > 在CVPR-NTIRE-2020真实图像超分比赛中以明显优势获得双赛道冠军。
#
# **算法创新设计**,与已有的超分辨率方法相比,RealSR的创新主要体现在三个方面:
#
# 1. RealSR采用了自主设计的新型图片退化方法,通过分析真实图片中的模糊和噪声,模拟真实图片的退化过程。
#
# 2. 不需要成对的训练数据,利用无标记的数据即可进行训练。
#
# 3. 可以处理低分辨率图像中的模糊噪声问题,得到更加清晰干净的高分辨结果。
#
# 【1】论文地址:[https://arxiv.org/pdf/1904.00523.pdf](http://)
#
# *"Toward Real-World Single Image Super-Resolution: A New Benchmark and A New Model"*
#
# 【2】项目地址:[https://github.com/Tencent/Real-SR](http://)
#
# 
#
# ```
# ppgan.apps.RealSRPredictor(output='output', weight_path=None)
# ```
# #### 参数
#
# - `output_path (str,可选的)`: 输出的文件夹路径,默认值:`output`.
# - `weight_path (None,可选的)`: 载入的权重路径,如果没有设置,则从云端下载默认的权重到本地。默认值:`None`。
#
# ## 2.5超分辨率模型 EDVRPredictor
# EDVR模型提出了一个新颖的视频具有增强可变形卷积的还原框架:第一,为了处理大动作而设计的一个金字塔,级联和可变形(PCD)对齐模块,使用可变形卷积以从粗到精的方式在特征级别完成对齐;第二,提出时空注意力机制(TSA)融合模块,在时间和空间上都融合了注意机制,用以增强复原的功能。
#
# > 在CVPR 2019 Workshop NTIRE 2019 视频恢复比赛中,来自商汤科技、港中文、南洋理工、深圳先进技术研究院的联合研究团队使用EDVR获得了全部四个赛道的所有冠军!
#
# **算法创新设计**:
#
# 1. 图像对齐(Alignment)。
#
# 视频相邻帧存在一定的抖动,必须先对齐才能进一步处理融合。以往这可以使用光流算法处理,但本文中作者发明了一种新的网络模块PCD对齐模块,使用Deformable卷积进行视频的对齐,整个过程可以端到端训练。
#
# 2. 时空信息融合(Fusion)。
#
# 挖掘时域(视频前后帧)和空域(同一帧内部)的信息融合。本文中作者发明了一种时空注意力模型进行信息融合。
#
# EDVR算法架构:
# 
#
# 其中PCD 对齐模块,使用金字塔结构级联的Deformable卷积构建,如图:
#
# 时空注意力融合模型TSA如图:
#
#
# 【1】论文地址:[https://arxiv.org/pdf/1905.02716.pdf](http://)
#
# *"EDVR: Video Restoration with Enhanced Deformable Convolutional Networks"*
#
# 【2】项目地址:[https://github.com/xinntao/EDVR](http://)
#
# 
#
# ```
# ppgan.apps.EDVRPredictor(output='output', weight_path=None)
# ```
# #### 参数
#
# - `output_path (str,可选的)`: 输出的文件夹路径,默认值:`output`.
# - `weight_path (None,可选的)`: 载入的权重路径,如果没有设置,则从云端下载默认的权重到本地。默认值:`None`。
# # **3.使用 PaddleGAN 进行视频修复**
# ## 3.1import-导入可视化需要的包
# In[ ]:
import cv2
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
import warnings
warnings.filterwarnings("ignore")
import paddle
print("本项目Paddle版本号:"+ paddle.__version__)
# ## 3.2定义函数用于展示视频
# In[ ]:
# 定义函数用于展示视频
def display(driving, fps, size=(8, 6)):
fig = plt.figure(figsize=size)
ims = []
for i in range(len(driving)):
cols = []
cols.append(driving[i])
im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
plt.axis('off')
ims.append([im])
video = animation.ArtistAnimation(fig, ims, interval=1000.0/fps, repeat_delay=1000)
plt.close()
return video
# ## 3.3用于处理的原始视频展示
# In[ ]:
video_path = '/home/aistudio/Peking_5s.mp4' # 需要处理的视频的路径
video_frames = imageio.mimread(video_path, memtest=False)
cap = cv2.VideoCapture(video_path) # 打开视频文件
fps = cap.get(cv2.CAP_PROP_FPS) # 获得视频的原分辨率
HTML(display(video_frames, fps).to_html5_video()) # Html5 video展示需要处理的原始黑白视频
# ## 3.4调用模型,视频处理过程
# In[23]:
# 使用插帧(DAIN), 上色(DeOldify), 超分(EDVR, RealSR)模型对该视频进行修复
"""
input参数表示输入的视频路径
proccess_order 表示使用的模型和顺序(目前支持)
output表示处理后的视频的存放文件夹
"""
get_ipython().run_line_magic('cd', '/home/aistudio/PaddleGAN/applications/')
get_ipython().system('python tools/video-enhance.py --input /home/aistudio/Peking_5s.mp4 --process_order DAIN DeOldify EDVR --output output_dir')
# ## 3.5处理后的视频展示
# In[24]:
# 处理好的视频路径如下, 注:如果视频太大耗时久又可能会报错,最好下载到本地来看。
output_video_path = '/home/aistudio/PaddleGAN/applications/output_dir/EDVR/Peking_5s_deoldify_out_edvr_out.mp4'
# 加载过长视频会造成内存溢出,可以在网页上展示处理后的19秒的视频
# output_video_path = '/home/aistudio/moderntimes_output19.mp4'
video_frames = imageio.mimread(output_video_path, memtest=False)
cap = cv2.VideoCapture(output_video_path) # 打开处理后的视频文件
fps = cap.get(cv2.CAP_PROP_FPS) # 获得视频的原分辨率
HTML(display(video_frames, fps).to_html5_video()) # 展示处理后的视频
# ## 3.6音频处理
# In[ ]:
# 完整版Peking_5s.mp4,添加了音频,需要下载到本地播放
# 以上过程没有考虑视频的音频,这部分代码用于音频的添加
video_frames = imageio.mimread(output_video_path2, memtest=False)
cap = cv2.VideoCapture(output_video_path2)
fps = cap.get(cv2.CAP_PROP_FPS) # 获得视频的原分辨率
HTML(display(video_frames, fps).to_html5_video())
|
#!/usr/bin/python
"""
path_stat.py - Functions from os.path that import 'stat'.
We want to keep bin/osh_parse free of I/O. It's a pure stdin/stdout filter.
"""
import posix
import stat
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
posix.stat(path)
except posix.error:
return False
return True
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = posix.stat(s)
except posix.error:
return False
return stat.S_ISDIR(st.st_mode)
|
""" Cisco_IOS_XR_sdr_invmgr_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR sdr\-invmgr package operational data.
This module contains definitions
for the following management objects\:
sdr\-inventory\: SDR information
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class SdrInventory(object):
"""
SDR information
.. attribute:: racks
RackTable
**type**\: :py:class:`Racks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sdr_invmgr_oper.SdrInventory.Racks>`
"""
_prefix = 'sdr-invmgr-oper'
_revision = '2015-11-09'
def __init__(self):
self.racks = SdrInventory.Racks()
self.racks.parent = self
class Racks(object):
"""
RackTable
.. attribute:: rack
Rack name
**type**\: list of :py:class:`Rack <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sdr_invmgr_oper.SdrInventory.Racks.Rack>`
"""
_prefix = 'sdr-invmgr-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rack = YList()
self.rack.parent = self
self.rack.name = 'rack'
class Rack(object):
"""
Rack name
.. attribute:: name <key>
Rack name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: slot
Slot name
**type**\: list of :py:class:`Slot <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sdr_invmgr_oper.SdrInventory.Racks.Rack.Slot>`
"""
_prefix = 'sdr-invmgr-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.name = None
self.slot = YList()
self.slot.parent = self
self.slot.name = 'slot'
class Slot(object):
"""
Slot name
.. attribute:: name <key>
Slot name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: card
Card
**type**\: list of :py:class:`Card <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sdr_invmgr_oper.SdrInventory.Racks.Rack.Slot.Card>`
"""
_prefix = 'sdr-invmgr-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.name = None
self.card = YList()
self.card.parent = self
self.card.name = 'card'
class Card(object):
"""
Card
.. attribute:: name <key>
Card
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: attributes
Attributes
**type**\: :py:class:`Attributes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sdr_invmgr_oper.SdrInventory.Racks.Rack.Slot.Card.Attributes>`
"""
_prefix = 'sdr-invmgr-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.name = None
self.attributes = SdrInventory.Racks.Rack.Slot.Card.Attributes()
self.attributes.parent = self
class Attributes(object):
"""
Attributes
.. attribute:: card_admin_state
Card Admin State
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: card_state
CardState
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: card_type
CardType
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: config_state
ConfigState
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: ctype
CType
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: monitor
Monitor
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: pi_slot_number
Pi Slot Number
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: power
Power
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: shutdown
Shutdown
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
.. attribute:: vm_state
VM State information
**type**\: int
**range:** \-2147483648..2147483647
**default value**\: 0
"""
_prefix = 'sdr-invmgr-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.card_admin_state = None
self.card_state = None
self.card_type = None
self.config_state = None
self.ctype = None
self.monitor = None
self.pi_slot_number = None
self.power = None
self.shutdown = None
self.vm_state = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-sdr-invmgr-oper:attributes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.card_admin_state is not None:
return True
if self.card_state is not None:
return True
if self.card_type is not None:
return True
if self.config_state is not None:
return True
if self.ctype is not None:
return True
if self.monitor is not None:
return True
if self.pi_slot_number is not None:
return True
if self.power is not None:
return True
if self.shutdown is not None:
return True
if self.vm_state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sdr_invmgr_oper as meta
return meta._meta_table['SdrInventory.Racks.Rack.Slot.Card.Attributes']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.name is None:
raise YPYModelError('Key property name is None')
return self.parent._common_path +'/Cisco-IOS-XR-sdr-invmgr-oper:card[Cisco-IOS-XR-sdr-invmgr-oper:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.attributes is not None and self.attributes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sdr_invmgr_oper as meta
return meta._meta_table['SdrInventory.Racks.Rack.Slot.Card']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.name is None:
raise YPYModelError('Key property name is None')
return self.parent._common_path +'/Cisco-IOS-XR-sdr-invmgr-oper:slot[Cisco-IOS-XR-sdr-invmgr-oper:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.card is not None:
for child_ref in self.card:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sdr_invmgr_oper as meta
return meta._meta_table['SdrInventory.Racks.Rack.Slot']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/Cisco-IOS-XR-sdr-invmgr-oper:sdr-inventory/Cisco-IOS-XR-sdr-invmgr-oper:racks/Cisco-IOS-XR-sdr-invmgr-oper:rack[Cisco-IOS-XR-sdr-invmgr-oper:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.slot is not None:
for child_ref in self.slot:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sdr_invmgr_oper as meta
return meta._meta_table['SdrInventory.Racks.Rack']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-sdr-invmgr-oper:sdr-inventory/Cisco-IOS-XR-sdr-invmgr-oper:racks'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.rack is not None:
for child_ref in self.rack:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sdr_invmgr_oper as meta
return meta._meta_table['SdrInventory.Racks']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-sdr-invmgr-oper:sdr-inventory'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.racks is not None and self.racks._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sdr_invmgr_oper as meta
return meta._meta_table['SdrInventory']['meta_info']
|
import io
import multiprocessing as mp
import sys
import time
import pytest
from omnibus import Sender, Receiver, Message, server
from omnibus.omnibus import OmnibusCommunicator
class TestOmnibus:
@pytest.fixture(autouse=True, scope="class")
def server(self):
# start server
ctx = mp.get_context('spawn') # threadsafe multiprocess method
p = ctx.Process(target=server.server)
p.start()
OmnibusCommunicator.server_ip = "127.0.0.1" # skip discovery
# wait until the server is alive
s = Sender()
r = Receiver("_ALIVE")
while r.recv(1) is None:
s.send("_ALIVE", "_ALIVE")
yield
# stop the server
p.terminate()
p.join()
@pytest.fixture()
def sender(self):
return Sender # for consistency with receiver
@pytest.fixture()
def receiver(self):
def _receiver(*channels):
r = Receiver(*channels)
time.sleep(0.05) # let the receiver connect to the server so messages aren't dropped
return r
return _receiver
def test_nominal(self, sender, receiver):
s = sender()
r = receiver("CHAN")
s.send("CHAN", "A")
assert r.recv(10) == "A"
def test_channels(self, sender, receiver):
s1 = sender()
r1 = receiver("CHAN1")
s2 = sender()
r2 = receiver("CHAN2")
r3 = receiver("CHAN")
s1.send("CHAN1", "A")
assert r1.recv(10) == "A"
assert r3.recv(10) == "A"
assert r2.recv(10) is None
s2.send("CHAN2", "B")
assert r2.recv(10) == "B"
assert r3.recv(10) == "B"
assert r1.recv(10) is None
def test_msg_objects(self, sender, receiver):
s = sender()
r = receiver("CHAN")
s.send_message(Message("CHAN", 10, "PAYLOAD"))
m = r.recv_message(10)
assert m.channel == "CHAN"
assert m.timestamp == 10
assert m.payload == "PAYLOAD"
def test_multi_channel_recieving(self, sender, receiver):
s = sender()
r = receiver("CHAN1", "CHAN2", "CHAN3")
s.send("CHAN1", "A")
assert r.recv(10) == "A"
s.send("CHAN2", "B")
assert r.recv(10) == "B"
s.send("CHAN3", "C")
assert r.recv(10) == "C"
class TestIPBroadcast:
@pytest.fixture()
def broadcaster(self):
ctx = mp.get_context('spawn')
p = ctx.Process(target=server.ip_broadcast)
p.start()
yield
p.terminate()
p.join()
def test_broadcast(self, broadcaster, monkeypatch):
# respond to the IP prompt if discovery times out
monkeypatch.setattr(sys, "stdin", io.StringIO("timeout"))
# make sure the server_ip isn't stored from previous tests
OmnibusCommunicator.server_ip = None
c = OmnibusCommunicator()
assert c.server_ip == server.get_ip()
def test_timeout(self, monkeypatch):
# respond to the IP prompt if discovery times out
monkeypatch.setattr(sys, "stdin", io.StringIO("timeout"))
# make sure the server_ip isn't stored from previous tests
OmnibusCommunicator.server_ip = None
c = OmnibusCommunicator()
assert c.server_ip == "timeout"
|
class ResultManager :
"""
1. definition
2. table
"""
def get_view_obj(self,nnid, wf_id):
"""
get view data for net config
:return:
"""
pass
def set_view_obj(self,nnid, wf_id):
"""
set net config data edited on view
:param obj:
:return:
"""
pass
|
import argparse
from msgraphy import GraphApi
import msgraphy_util
def main(name):
api = GraphApi(scopes=["Sites.Read.All"])
response = api.sharepoint.list_sites(search=name)
for site in response.value:
print(site, end="\n\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='List or search for sharepoint sites'
)
parser.add_argument("name", type=str, nargs="?", help="show only sites which contains this name")
args = parser.parse_args()
main(args.name, )
|
# -*- Python -*-
# Configuration file for the 'lit' test runner.
import os
import lit.formats
from lit.llvm import llvm_config
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'mlir-clang'
# testFormat: The test format to use to interpret tests.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files. This is overriden
# by individual lit.local.cfg files in the test subdirectories.
config.suffixes = ['.c', '.cpp', '.cu']
# excludes: A list of directories or files to exclude from the testsuite even
# if they match the suffixes pattern.
#config.excludes = ['Inputs']
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.mlir_clang_obj_root, 'test')
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
llvm_config.with_environment('PATH', config.polygeist_tools_dir, append_path=True)
llvm_config.with_environment('PATH', config.mlir_clang_obj_root, append_path=True)
import tempfile
lit_tmp = tempfile.mkdtemp(prefix="lit_tmp_")
os.environ.update({
'TMPDIR': lit_tmp,
'TMP': lit_tmp,
'TEMP': lit_tmp,
'TEMPDIR': lit_tmp,
})
# Propagate some variables from the host environment.
llvm_config.with_system_environment(
['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
llvm_config.use_default_substitutions()
# For each occurrence of a tool name, replace it with the full path to
# the build directory holding that tool. We explicitly specify the directories
# to search to ensure that we get the tools just built and not some random
# tools that might happen to be in the user's PATH.
tool_dirs = [config.llvm_tools_dir]
tools = [ 'opt', 'clang' ]
llvm_config.add_tool_substitutions(tools, tool_dirs)
tool_dirs = [config.polygeist_tools_dir, config.mlir_clang_obj_root]
tools = [ 'mlir-clang' ]
llvm_config.add_tool_substitutions(tools, tool_dirs)
config.substitutions.append(('%stdinclude', '-I ' + config.clang_header_dir + " -I " + config.test_source_root + "/polybench/utilities"))
config.substitutions.append(('%polyexec', config.test_source_root + '/polybench/utilities/polybench.c -D POLYBENCH_TIME -D POLYBENCH_NO_FLUSH_CACHE -D MINI_DATASET'))
config.substitutions.append(('%polyverify', config.test_source_root + '/polybench/utilities/polybench.c -D POLYBENCH_DUMP_ARRAYS -D POLYBENCH_NO_FLUSH_CACHE -D MINI_DATASET'))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 12:41:43 2019
@author: abraverm
"""
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import math as math
import numpy as np
def get_laplace_mtrx(nx,ny,d=1):
dx = d # defusion speed
dy = d
diag_block = np.eye(ny)*(-2/dx**2+-2/dy**2)
diag_block = diag_block + np.diag(np.ones((ny-1))/dy**2,1)
diag_block = diag_block + np.diag(np.ones((ny-1))/dy**2,-1)
Matrix = np.kron(np.eye(nx),diag_block)
Matrix = Matrix + np.diag(np.ones([(nx-1)*(ny)])/dx**2, ny )
Matrix = Matrix + np.diag(np.ones([(nx-1)*(ny)])/dx**2, -(ny ))
# M_inv = np.linalg.inv(Matrix)
return Matrix
def get_oponent_filter() :
s2 = math.sqrt(2.0)
s6 = math.sqrt(6.0)
a = 0.2989
b = 0.5780
c = 0.1140
# d = 1/math.sqrt(3.0)
# a = d
# b = d
# c = d
flt = [[1/s2, -1/s2, 0 ],
[1/s6, 1/s6, -2/s6],
[ a , b , c ]
]
return np.transpose(flt)
def opponent_init(shape, dtype='float32', partition_info=None):
return tf.convert_to_tensor ( get_oponent_filter(), dtype =tf.float32, name ='oponent_kerenl' )
def inv_opponent_init(shape, dtype=None,partition_info=None):
kernel = np.linalg.inv(get_oponent_filter())
return tf.convert_to_tensor ( kernel , dtype =tf.float32, name ='inv_oponent_kerenl' )
def gabor_odd_init(shape, dtype=None,partition_info=None):
kernel = np.zeros([1,2,3,3])
kernel[:,:,0,0] = [[-1.0,1.0]]
kernel[:,:,1,1] = [[-1.0,1.0]]
kernel[:,:,2,2] = [[-1.0,1.0]]
return tf.convert_to_tensor ( kernel, dtype='float32' )
def gabor_even_init(shape, dtype=None,partition_info=None):
kernel = np.zeros([1,3,3,3])
kernel[:,:,0,0] = [[-1.0,2.0,-1.0]]
kernel[:,:,1,1] = [[-1.0,2.0,-1.0]]
kernel[:,:,2,2] = [[-1.0,2.0,-1.0]]
return tf.convert_to_tensor ( kernel, dtype='float32')
class WCWeights(tf.keras.layers.Layer):
def __init__(self ):
super(WCWeights, self).__init__()
self.gabor_even = layers.Conv2D(3, (1,3), padding='same', activation='tanh',
kernel_initializer=gabor_even_init, name=f'gabor_even')
def call(self, inputs, prmd_levels ):
return self.get_wc_waits(inputs, prmd_levels)
def wc_waits_layer(self, inputs, depth):
factor = 2**depth
shape = inputs.shape.as_list()
prmd_l = layers.AvgPool2D(pool_size=factor,name=f'pyramid_avg_pool_{depth}')(inputs)
prmd_l = self.gabor_even(prmd_l)
# prmd_l = layers.Conv2D(3, (1,3), padding='same', activation='tanh',
# kernel_initializer=gabor_even_init, name=f'prmd_w_even_gabor_{depth}')(prmd_l)
prmd_l = layers.UpSampling2D( size=factor, interpolation='nearest',name=f'prmd_upsampl_{depth}')(prmd_l)
if shape != prmd_l.shape.as_list() :
pad_h = shape[1] - prmd_l.shape.as_list()[1]
h_pad_h = int(pad_h/2)
pad_w = shape[2] - prmd_l.shape.as_list()[2]
h_pad_w = int(pad_w/2)
padding = ((h_pad_h, pad_h - h_pad_h) , (h_pad_w, pad_w - h_pad_w) )
prmd_l = layers.ZeroPadding2D(padding = padding , name=f'prmd_zeropadd_{depth}')(prmd_l)
prmd_l = tf.abs(prmd_l)
w = tf.stack([inputs,prmd_l], axis=4)
#w = tf.abs(tf.stack([inputs,prmd_l], axis=4))
#w = tf.reduce_max(w,axis=4)
w = tf.math.reduce_sum(w,axis=(4))
#w = tf.abs(w)
return w
def get_wc_waits(self, inputs, prmd_levels):
w = inputs
for idx in range(1,prmd_levels):
w = self.wc_waits_layer(w, idx)
w = tf.math.reduce_sum(w,axis=(3))
w = tf.stack([w,w,w],axis=3)
w = tf.abs(w)
return w
def dst(x,axis=-1):
"""Discrete Sine Transform (DST-I)
Implemented using 2(N+1)-point FFT
xsym = r_[0,x,0,-x[::-1]]
DST = (-imag(fft(xsym))/2)[1:(N+1)]
adjusted to work over an arbitrary axis for entire n-dim array
"""
m,n = x.shape
pad = np.zeros((m,1))
pad = tf.convert_to_tensor(pad, dtype=tf.float32)
xsym = tf.concat([pad,x,pad,tf.reverse(-x,[1])], axis=1)
xsym = tf.complex(xsym, np.zeros(xsym.shape,dtype='float32'))
DST = tf.signal.fft(xsym)
return (-tf.math.imag(DST)/2)[:, 1:n+1]
#def dct2(x):
# return dst2(x)
def dct2(x):
r = dst( tf.transpose(x))
return dst( tf.transpose(r))
def dct2_orgn(x):
ry = tf.signal.dct(x,type=1)
r = tf.signal.dct( tf.transpose(ry), type=1)
return tf.transpose(r)
def idct2(x):
return dct2(x)
def get_lam_mat(m,n,h=0.7):
pi=np.pi
lam_n = np.arange(1,n+1)
lam_n = -4/h**2 * (np.sin((lam_n*pi) / (2*(n - 1))))**2
lam_m = np.arange(1,m+1)
lam_m = -4/h**2 * (np.sin((lam_m*pi) / (2*(m -1))))**2 #$compute $\lambda_x$
lam_mat_np = np.ones([m,n])
for i in np.arange(0,m):
for j in np.arange(0,n):
lam_mat_np[i,j] = (lam_mat_np[i,j]) / (lam_m[i] + lam_n[j])
#return tf.convert_to_tensor(lam_mat_np, dtype=tf.float32)
return lam_mat_np
def fft_poisson_2d(f, lam_mat ):
m,n = lam_mat.shape
f_bar = idct2(f) # f_bar= fourier transform of f
normlz = (2.0/n + 1.0)*(2.0/m + 1.0)
f_bar = f_bar * normlz #Normalize
u_bar = layers.Multiply()([f_bar,lam_mat])
u = dct2(u_bar) #sine transform back
#normlz = 2.0/((n-1.0)*(m-1.0))
normlz = 2.0**2/((n+1.0)*(m+1.0))
u = u * normlz #normalize
return u
def poisson_3d (f, lam_mat ):
f_t = tf.transpose(f,[2,0,1])
fixed_poisson_2d = lambda x : fft_poisson_2d(x, lam_mat)
res_t = tf.map_fn(fixed_poisson_2d,f_t,parallel_iterations=True)
return tf.transpose(res_t,[1,2,0])
class PoisonSolver3D(tf.keras.layers.Layer):
def __init__(self,in_shape, h=0.7): # **kwargs):
#kwargs['autocast']=False
super(PoisonSolver3D, self).__init__() #**kwargs)
#self.h = tf.Variable(h,name='defusion_speed', trainable=True)
_,w,h,c = in_shape
self.lamda_mat = tf.constant(get_lam_mat(w,h))
def get_config(self):
super(PoisonSolver3D, self).get_config()
def call(self, inp):
#_,w,h,c = inp.shape
#self.lamda_mat=tf.constant(get_lam_mat(w,h))
fixed_poisson = tf.function(lambda x : poisson_3d(x, self.lamda_mat) )
return tf.map_fn(fixed_poisson,inp)
class PoisonSolverLplMat(tf.keras.layers.Layer):
def __init__(self,in_shape,d=1.0): # **kwargs):
super(PoisonSolverLplMat, self).__init__() #**kwargs)
_,w,h,c = in_shape
lpl_mat = tf.constant(get_laplace_mtrx(w,h,d=d), dtype='float32')
print("lpl shape is" , lpl_mat.shape)
self.inv_lpl_mat = tf.constant(tf.linalg.inv(lpl_mat))
print("lpl inv shape is" ,self.inv_lpl_mat.shape)
def get_config(self):
super(PoisonSolverLplMat, self).get_config()
def laplace_inv(self,x):
w,h,c = x.shape
r = tf.reshape(x, [-1, c])
print("r shape is" ,r.shape)
print("lpl shape is" ,self.inv_lpl_mat.shape)
r = tf.matmul(self.inv_lpl_mat, r)
return tf.reshape(r, x.shape)
def call(self, inp):
return tf.map_fn(self.laplace_inv,inp)
class ApplyWeights(tf.keras.layers.Layer):
def __init__(self,alpha=0.6, beta =0.3 ):
super(ApplyWeights, self).__init__()
self.alpha = tf.Variable(alpha,name='alpha', trainable=True)
self.beta = tf.Variable(beta, name='beta', trainable=True)
def call(self, im_d, w ):
im_d_w = tf.math.multiply(im_d,w)
return self.alpha*im_d + self.beta*im_d_w
def get_wc_model (in_shape,
pad=3,
alpha = 0.8,
beta = 0.4,
pyramid_depth = 3,
defusion_speed = 0.6,
reuse=None,
is_training=True,
scope='wc_model',
lpl_mat = None,
activation ='tanh',
trainable=True,
):
inputs = layers.Input(shape=in_shape)
in_opponent = layers.Dense(3, use_bias=False, activation=activation,
kernel_initializer=opponent_init, dtype='float32', name="to_opponent") (inputs)
#show_tensor_img(in_opponent[0],"opponent")
f = layers.ZeroPadding2D(padding = pad )(in_opponent)
gabor_odd = layers.Conv2D(3,(1,2), padding='same', activation=activation,
kernel_initializer=gabor_odd_init, dtype='float32', name='gabor_odd')
im_dx = gabor_odd(f)
f_t = tf.transpose(f, [0,2,1,3])
im_dy = gabor_odd(f_t)
wc_weights = WCWeights()
#ww = layers.ZeroPadding2D(padding = pad )(in_opponent)
#wy = wc_weights(tf.transpose(ww, [0,2,1,3]), pyramid_depth )
wx = wc_weights(f, pyramid_depth )
wy = wc_weights(f_t, pyramid_depth )
# wx = layers.ZeroPadding2D(padding = pad )(wx)
# wy = layers.ZeroPadding2D(padding = pad )(wy)
#show_tensor_img(wx[0],"wx")
#show_tensor_img(wy[0],"wy")
apply_weights = ApplyWeights(alpha=alpha, beta=beta)
trig_xx = apply_weights(im_dx, wx)
trig_yy = apply_weights(im_dy, wy)
trig_xx = gabor_odd(trig_xx)
trig_yy = gabor_odd(trig_yy)
trig_yy = tf.transpose(trig_yy,[0,2,1,3])
div_trig = layers.add([trig_xx , trig_yy])
#show_tensor_img(div_trig[0],"div_trig")
if lpl_mat is None:
res=PoisonSolver3D(in_shape=div_trig.shape, h=defusion_speed)(div_trig)
else:
res = PoisonSolverLplMat(in_shape=div_trig.shape, d=defusion_speed)(div_trig)
#show_tensor_img(res[0],"result img")
res= layers.Cropping2D(cropping=pad)(res)
model = tf.keras.Model(inputs=inputs, outputs=res, name='WC_model')
if trainable == False:
for layer in model.layers:
layer.trainable=False
return model
def show_tensor_img(tf_img, title = 'no title'):
my_show_image(tf_img,title)
def my_show_image(img,title):
plt.figure(figsize=(10,10))
plt.imshow(img)
plt.title(title)
plt.show()
def decode_img(img,im_w=64,im_h=64):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# resize the image to the desired size.
#img = tf.image.resize(img, [im_w, im_w])
img = tf.image.resize_with_pad(img, im_h, im_w)
return img
if __name__ == '__main__':
from enetcfg import EnetCfg
flags = EnetCfg()
#flags.DEFINE_string('wc_activatin', None, "None - use no activation, <actv> - use <actv> in water collor midle layers, for example 'tanh/relu'" )
flags.default_enet_cfg()
cfg = flags.parse_args()
#cfg = flags.parse_args(['--wc_lpl_mat','1'])
#with tf.device('/CPU:0'):
im_w = 256 #64
im_h = 256 #64
#im_w = 128
#im_h = 128
im_c = 3
pad = 8
img_file = '../gray_square.png'
img_file = '../stars_clean.png'
#img_file = '../wc_ddrm.png'
img = tf.io.read_file(img_file)
#list_ds = tf.data.Dataset.list_files(img_file) #str(data_dir/'*/*')
#for f in list_ds.take(1):
# print(f.numpy())
#my_img_rgb = tf.image.decode_image('gray_square.png', dtype=tf.float32)
my_img_rgb = decode_img(img, im_w,im_h)
my_show_image(my_img_rgb, "source img")
#im1 = tf.image.resize_with_pad(my_img_rgb, im_h, im_w)
im1 = tf.reshape(my_img_rgb,[1,im_h,im_w,im_c], name="input_reshaped")
show_tensor_img(im1[0],"Source image tensot[0]")
#cfg.wc_lpl_mat = 1
#cfg.wc_activation = None
k = 1.0
alpha = 0.9 * k
beta = 0.2 * k
pyramid_depth = 4
defusion_speed = 1.0
model = get_wc_model( in_shape=[im_w,im_h,im_c], pad=pad,alpha=alpha, beta=beta, pyramid_depth=pyramid_depth,
defusion_speed=defusion_speed, lpl_mat=cfg.wc_lpl_mat, activation=cfg.wc_activation )
res = model([im1,im1])
res_rgb = layers.Dense(3, input_shape=(3,), use_bias=False, kernel_initializer=inv_opponent_init, dtype='float32')(res)
#res_rgb = tf.keras.activations.sigmoid(res_rgb)
#res_rgb = tf.keras.activations.tanh(res_rgb)
#res_rgb = tf.keras.activations.relu(res_rgb)
res = tf.reshape(res,[res.shape[1],res.shape[2],im_c])
show_tensor_img(res,"result image")
show_tensor_img(res[:,:,0],"result image 0")
show_tensor_img(res[:,:,1],"result image 1")
show_tensor_img(res[:,:,2],"result image 2")
res_rgb = tf.reshape(res_rgb,[res_rgb.shape[1],res_rgb.shape[2],im_c])
show_tensor_img(res_rgb,"result RGB image")
my_show_image(my_img_rgb, "source img")
# loss='categorical_crossentropy',
# optimizer='adadelta'
# model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy', 'mean_squared_error'])
#
# tf.keras.utils.plot_model(model, 'WC_MODEL_with_shape_info.png', show_shapes=True)
#
# print(model.summary())
# for v in model.variables:
# print(v.name, v.numpy())
|
#!/usr/bin/python
# Perceptron learning
import csv
import numpy as np
import random
X = []
Y = []
# read in feature data
with open('admission.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
X.append(np.array([float(f) for f in row[:-1]]))
Y.append(float(row[-1]))
# partition data in train ({X,Y}tr) and test ({X,Y}te)
nte = 10
Xtr = X[:-nte]
Ytr = Y[:-nte]
Xte = X[-nte:]
Yte = Y[-nte:]
def pred(a0, a, x):
return a0 + np.dot(a, x)
def loss(a0, a, xs, ys):
l = 0
for x, y in zip(xs, ys):
p = pred(a0, a, x)
if y * p < 0:
l -= (y * p)
return l
# learning rate
lr = 1
# init alpha
alpha0 = 0
alpha = np.zeros(len(Xtr[0]))
cur_loss = loss(alpha0, alpha, Xtr, Ytr)
iters = 1000
while iters > 0:
i = random.choice(range(len(Xtr)))
x = Xtr[i]
y = Ytr[i]
# predict
yhat = pred(alpha0, alpha, x)
print x, y, yhat
if y * yhat <= 0:
alpha0 = alpha0 + lr * y
alpha = alpha + lr * y * x
print alpha0, alpha
loss_tr, loss_te = loss(alpha0, alpha, Xtr, Ytr), loss(alpha0, alpha, Xte, Yte)
print loss_tr, loss_te
iters -= 1
print alpha0, alpha
|
from keras import models
from keras import layers
# causal conv
def __causal_gated_conv1D(x=None, filters=16, length=6, strides=1):
def causal_gated_conv1D(x, filters, length, strides):
x_in_1 = layers.Conv1D(filters=filters // 2,
kernel_size=length,
dilation_rate=strides, # it's correct, use this instead strides for shape matching
strides=1,
padding="causal")(x)
x_sigmoid = layers.Activation(activation="sigmoid")(x_in_1)
x_in_2 = layers.Conv1D(filters=filters // 2,
kernel_size=length,
dilation_rate=strides, # it's correct, use this instead strides for shape matching
strides=1,
padding="causal")(x)
x_tanh = layers.Activation(activation="tanh")(x_in_2)
x_out = layers.Multiply()([x_sigmoid, x_tanh])
return x_out
if x is None:
return lambda _x: causal_gated_conv1D(x=_x, filters=filters, length=length, strides=strides)
else:
return causal_gated_conv1D(x=x, filters=filters, length=length, strides=strides)
def SwishNet(input_shape, classes, width_multiply=1):
_x_in = layers.Input(shape=input_shape)
# 1 block
_x_up = __causal_gated_conv1D(filters=16 * width_multiply, length=3)(_x_in)
_x_down = __causal_gated_conv1D(filters=16 * width_multiply, length=6)(_x_in)
_x = layers.Concatenate()([_x_up, _x_down])
# 2 block
_x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)
_x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)
_x = layers.Concatenate()([_x_up, _x_down])
# 3 block
_x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)
_x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)
_x_concat = layers.Concatenate()([_x_up, _x_down])
_x = layers.Add()([_x, _x_concat])
# 4 block
_x_loop1 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=3)(_x)
_x = layers.Add()([_x, _x_loop1])
# 5 block
_x_loop2 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)
_x = layers.Add()([_x, _x_loop2])
# 6 block
_x_loop3 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)
_x = layers.Add()([_x, _x_loop3])
# 7 block
_x_forward = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)
# 8 block
_x_loop4 = __causal_gated_conv1D(filters=32 * width_multiply, length=3, strides=2)(_x)
# output
_x = layers.Concatenate()([_x_loop2, _x_loop3, _x_forward, _x_loop4])
_x = layers.Conv1D(filters=classes, kernel_size=1)(_x)
_x = layers.GlobalAveragePooling1D()(_x)
_x = layers.Activation("softmax")(_x)
model = models.Model(inputs=_x_in, outputs=_x)
return model
def SwishNetWide(input_shape, classes):
return SwishNet(input_shape=input_shape, classes=classes, width_multiply=2)
def SwishnetSlim(input_shape, classes):
return SwishNet(input_shape=input_shape, classes=classes, width_multiply=1)
if __name__ == "__main__":
import numpy as np
net = SwishNet(input_shape=(16, 20), classes=2)
net.summary()
print(net.predict(np.random.randn(2, 16, 20)))
|
'''
threaded_dl
===========
> threaded_dl links thread_count filename_format <flags>
links:
The name of a file containing links to download, one per line.
Uses pipeable to support !c clipboard, !i stdin lines of urls.
thread_count:
Integer number of threads to use for downloading.
filename_format:
A string that controls the names of the downloaded files. Uses Python's
brace-style formatting. Available formatters are:
- {basename}: The name of the file as indicated by the URL.
E.g. example.com/image.jpg -> image.jpg
- {extension}: The extension of the file as indicated by the URL, including
the dot. E.g. example.com/image.jpg -> .jpg
- {index}: The index of this URL within the sequence of all downloaded URLs.
Starts from 0.
- {now}: The unix timestamp at which this download job was started. It might
be ugly but at least it's unambiguous when doing multiple download batches
with similar filenames.
flags:
--bytespersecond X:
Limit the overall download speed to X bytes per second. Uses
bytestring.parsebytes to support strings like "1m", "500k", "2 mb", etc.
--headers X:
;
--timeout X:
Integer number of seconds to use as HTTP request timeout for each download.
'''
import argparse
import ast
import os
import queue
import shutil
import sys
import threading
import time
from voussoirkit import betterhelp
from voussoirkit import bytestring
from voussoirkit import downloady
from voussoirkit import pipeable
from voussoirkit import ratelimiter
from voussoirkit import ratemeter
from voussoirkit import sentinel
from voussoirkit import threadpool
from voussoirkit import vlogging
log = vlogging.getLogger(__name__, 'threaded_dl')
downloady.log.setLevel(vlogging.WARNING)
THREAD_FINISHED = sentinel.Sentinel('thread finished')
def clean_url_list(urls):
for url in urls:
if isinstance(url, (tuple, list)):
(url, filename) = url
else:
filename = None
url = url.strip()
if not url:
continue
if url.startswith('#'):
continue
if filename:
yield (url, filename)
else:
yield url
def download_job(
url,
filename,
*,
bytespersecond=None,
headers=None,
meter=None,
timeout=None,
):
log.info(f'Starting "{filename}"')
downloady.download_file(
url,
filename,
bytespersecond=bytespersecond,
headers=headers,
ratemeter=meter,
timeout=timeout,
)
log.info(f'Finished "{filename}"')
def prepare_urls_filenames(urls, filename_format):
now = int(time.time())
if os.path.normcase(filename_format) != os.devnull:
index_digits = len(str(len(urls)))
filename_format = filename_format.replace('{index}', '{index:0%0dd}' % index_digits)
if '{' not in filename_format and len(urls) > 1:
filename_format += '_{index}'
if '{extension}' not in filename_format and '{basename}' not in filename_format:
filename_format += '{extension}'
urls_filenames = []
for (index, url) in enumerate(clean_url_list(urls)):
if isinstance(url, (tuple, list)):
(url, filename) = url
else:
basename = downloady.basename_from_url(url)
extension = os.path.splitext(basename)[1]
filename = filename_format.format(
basename=basename,
ext=extension,
extension=extension,
index=index,
now=now,
)
if os.path.exists(filename):
log.info(f'Skipping existing file "{filename}"')
continue
urls_filenames.append((url, filename))
return urls_filenames
def threaded_dl(
urls,
thread_count,
filename_format,
bytespersecond=None,
headers=None,
timeout=None,
):
urls_filenames = prepare_urls_filenames(urls, filename_format)
if not urls_filenames:
return
if bytespersecond is not None:
# It is important that we convert this to a Ratelimter now instead of
# passing the user's integer to downloady, because we want all threads
# to share a single limiter instance instead of each creating their
# own by the integer.
bytespersecond = ratelimiter.Ratelimiter(bytespersecond)
meter = ratemeter.RateMeter(span=5)
pool = threadpool.ThreadPool(thread_count, paused=True)
ui_stop_event = threading.Event()
ui_kwargs = {
'meter': meter,
'stop_event': ui_stop_event,
'pool': pool,
}
ui_thread = threading.Thread(target=ui_thread_func, kwargs=ui_kwargs, daemon=True)
ui_thread.start()
kwargss = []
for (url, filename) in urls_filenames:
kwargs = {
'function': download_job,
'kwargs': {
'bytespersecond': bytespersecond,
'filename': filename,
'headers': headers,
'meter': meter,
'timeout': timeout,
'url': url,
}
}
kwargss.append(kwargs)
pool.add_many(kwargss)
for job in pool.result_generator():
if job.exception:
ui_stop_event.set()
ui_thread.join()
raise job.exception
ui_stop_event.set()
ui_thread.join()
def ui_thread_func(meter, pool, stop_event):
if pipeable.OUT_PIPE:
return
while not stop_event.is_set():
width = shutil.get_terminal_size().columns
speed = meter.report()[2]
message = f'{bytestring.bytestring(speed)}/s | {pool.running_count} threads'
spaces = ' ' * (width - len(message) - 1)
pipeable.stderr(message + spaces, end='\r')
stop_event.wait(timeout=0.5)
def threaded_dl_argparse(args):
urls = pipeable.input(args.url_file, read_files=True, skip_blank=True, strip=True)
urls = [u.split(' ', 1) if ' ' in u else u for u in urls]
headers = args.headers
if headers is not None:
if len(headers) == 1 and headers[0].startswith('{'):
headers = ast.literal_eval(headers[0])
else:
keys = headers[::2]
vals = headers[1::2]
headers = {key: val for (key, val) in zip(keys, vals)}
bytespersecond = args.bytespersecond
if bytespersecond is not None:
bytespersecond = bytestring.parsebytes(bytespersecond)
threaded_dl(
urls,
bytespersecond=bytespersecond,
filename_format=args.filename_format,
headers=headers,
thread_count=args.thread_count,
timeout=args.timeout,
)
return 0
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('url_file')
parser.add_argument('thread_count', type=int)
parser.add_argument('filename_format', nargs='?', default='{now}_{index}_{basename}')
parser.add_argument('--bytespersecond', default=None)
parser.add_argument('--timeout', default=15)
parser.add_argument('--headers', nargs='+', default=None)
parser.set_defaults(func=threaded_dl_argparse)
return betterhelp.single_main(argv, parser, __doc__)
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
required = [
]
vars = {}
with open((path.join(here, 'pydeclarative', '__about__.py'))) as fp:
exec(fp.read(), vars)
setup(
name='pydeclarative',
version=vars['__version__'],
description='Declarative style in Python.',
long_description=long_description,
author='Yegorov A.',
author_email='yegorov0725@yandex.ru',
url='https://github.com/yegorov/pydeclarative',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=required,
license='MIT',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='declarative function loop chain',
)
|
import sys
import time
import os
import json
import getpass
import platform
from termcolor import colored
platform = platform.system()
def cls():
if platform == 'Linux':
os.system('clear')
elif platform == 'Windows':
os.system('cls')
def read_file(file_name):
with open(file_name, 'r') as json_file:
return json.load(json_file)
def write_file(file_name, tasks):
with open(file_name, 'w') as json_file:
json.dump(tasks, json_file)
def load_task_time(task):
tasks = read_file(task_json)
if task not in tasks:
print colored("This task is not yet created", 'red')
sys.exit()
last_time = tasks[task]
return last_time
def update_task(task, new_time):
tasks = read_file(task_json)
tasks[task] = new_time
write_file(task_json, tasks)
def display_tasks():
tasks = read_file(task_json)
if tasks == {}:
print colored("No tasks available", "red")
return
i = 1
for key in tasks:
print colored(str(i), 'yellow') + " " + colored(key, 'green') + " " + colored(tasks[key], 'blue')
i += 1
def completed_task(task):
tasks = read_file(task_json)
del tasks[task]
write_file(task_json, tasks)
def twenty_hours(hr, mi, sec, task):
while(1):
try:
sys.stdout.write("\r")
if(sec == '60'):
sec = '00'
mi = str(int(mi) + 1)
if(mi == '60'):
mi = '00'
hr = str(int(hr) + 1)
#print hr
if(hr == '20'):
print 'You have completed learning ' +colored(task, 'green')+ ' for ' + colored(hr+' hours '+mi+' minutes and '+sec+' seconds', 'green')
completed_task(task)
sys.exit()
sec = str(int(sec) + 1)
if len(hr) == 1:
hr = '0' + hr
if len(mi) == 1:
mi = '0' + mi
if len(sec) == 1:
sec = '0' + sec
sys.stdout.write(colored(colored(task, 'yellow') + ' ' + hr+':'+mi+':'+sec, 'green'))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
try:
cls()
print colored("Press CTRL+C to Pause and Press C to Continue", 'cyan')
print colored('Paused', 'yellow')
x = raw_input()
if(x == 'c'):
cls()
print colored("Press CTRL+C to Pause and Press C to Continue", 'cyan')
print colored('\nResuming...', 'yellow')
time.sleep(1)
cls()
print colored("Press CTRL+C to Pause and Press C to Continue", 'cyan')
continue
except KeyboardInterrupt:
cls()
print 'You have learned ' + colored(task, 'yellow')+ ' for ' + colored(hr+' hours '+mi+' minutes and '+sec+' seconds', 'yellow')
print colored('Bye...', 'green')
new_time = hr + ':' + mi + ':' + sec
update_task(task, new_time)
break
def initial_screen():
cls()
print colored("Press CTRL+C to Pause and Press C to Continue", 'cyan')
def createJSON(task_json, task_folder):
'''user = getpass.getuser()
json_path = '/home/'+user+'/20H/task.json'''
if not os.path.exists(task_json):
os.system('mkdir ' + task_folder)
with open(task_json, 'w') as json_file:
json.dump({}, json_file)
def main():
flag = sys.argv[1]
if flag == '-d':
display_tasks()
elif flag == '-n':
task = sys.argv[2]
initial_screen()
tasks = read_file(task_json)
if task in tasks:
print colored("You already have a task named " + colored(task, "yellow"), "red")
option = raw_input(colored("Do you want to overwrite (y/n): "))
if option == 'y' or option == 'Y':
tasks[task] = '00:00:00'
write_file(task_json, tasks)
elif option == 'n' or option == 'N':
sys.exit()
else:
print colored("Invalid Option", "red")
sys.exit()
else:
tasks = read_file(task_json)
tasks[task] = '00:00:00'
write_file(task_json, tasks)
initial_screen()
twenty_hours('00', '00', '00', task)
elif flag == '-l':
task = sys.argv[2]
last_time = load_task_time(task)
hms = last_time.split(':')
cls()
print colored("Press CTRL+C to Pause and Press C to Continue", 'cyan')
twenty_hours(hms[0], hms[1], hms[2], task)
if platform == 'Linux':
task_folder = '/home/' + getpass.getuser() + '/20H'
task_json = task_folder + '/task.json'
elif platform == 'Windows':
task_folder = 'C:\\Users\\' + getpass.getuser() + '\\20H'
task_json = task_folder + '/task.json'
createJSON(task_json, task_folder)
#main()
|
def with_correlated_columns(self,num_rows=100,x_range=5,slope=1,noise=1):
from random import random
table = self.select().with_columns("x",[],"y",[])
for _ in range(num_rows):
seed = random()
x = seed*x_range
y = seed*x_range*slope
y = y + (-1+2*random())*noise
table = table.with_row([x,y])
return table
def correlation(self,col1_name,col2_name):
import statistics
# get column arrays
col1 = self.column(col1_name)
col2 = self.column(col2_name)
# standardize units
col1_s = (col1 - statistics.mean(col1))/statistics.stdev(col1)
col2_s = (col2 - statistics.mean(col2))/statistics.stdev(col2)
# correlation is the mean product of standard units
return statistics.mean(col1_s*col2_s)
setattr(Table,"with_correlated_columns",with_correlated_columns)
setattr(Table,"correlation",correlation)
|
import pytest
from django.test.client import Client
from django.urls import reverse
from .view_config import PARAMETRIZED_PUBLIC_VIEWS
# pylint: disable=unused-argument
@pytest.mark.django_db
@pytest.mark.parametrize("view_name,post_data", PARAMETRIZED_PUBLIC_VIEWS)
def test_public_view_status_code(load_test_data, view_name, post_data):
"""
This test checks whether the given view return the correct status code for anonymous users
:param load_test_data: The fixture providing the test data (see :meth:`~tests.conftest.load_test_data`)
:type load_test_data: NoneType
:param view_name: The identifier of the view
:type view_name: str
:param post_data: The post data for this view
:type post_data: dict
"""
client = Client()
url = reverse(view_name)
if post_data:
response = client.post(url, data=post_data)
else:
response = client.get(url)
print(response.headers)
if post_data:
# Post-views should redirect after a successful operation
assert response.status_code == 302
else:
# Get-views should return 200
assert response.status_code == 200
|
#! /usr/bin/env python3
"""
retrain_emission.py: take an HDF5 file and segmentations, and output parameters of a mixture model.
"""
# std lib:
import argparse
import os
import sys
import random
from collections import defaultdict
from tqdm import tqdm
# numerics:
import numpy as np
import h5py
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
def pool_reads(h, K):
"""
Select (up to) K random segmented reads from the dataset `h`. Return as a dictionary of pooled scaled samples
of form { <REGION_NAME> :: str -> <SAMPLES> :: NDArray(float) }.
"""
# collect scaled samples for each state:
pool = defaultdict(list)
rnames = random.sample(h['scaled'].keys(), min(K, len(h['scaled'].keys())))
for rid in tqdm(rnames):
try:
assert(len(h['scaled'][rid]) == len(h['states'][rid]))
for k in range(len(h['states'][rid])):
pool[ h['states'][rid][k] ].append( h['scaled'][rid][k] )
except:
pass
# process into a dict of numpy arrays and return:
pool = dict(pool)
for k, v in pool.items():
pool[k] = np.array(v)
return pool
def retrain_emission(hdf_path, nreads, bayesian, components, verbose):
"""Retrain gaussian mixture model from parameters."""
# load dataset:
hdf = h5py.File(args.hdf_path, 'r')
assert ('states' in hdf.keys() and 'scaled' in hdf.keys()), \
"[retrain_emission.py] ERR: both `samples` and `states` must be groups in the HDF5."
# select up to `nreads` random segmented reads from the dataset:
print("[retrain_emission.py] Collecting and pooling {} random reads (this may take a while...)".format(nreads))
segments = pool_reads(hdf, nreads)
# compute GMM parameters for each segment:
CONFIG = {
'ncomp': components,
'niter': 100,
'ninit': 5,
'verbose': (1 if verbose else 0),
'bayesian': bayesian
}
print("----- TRAINING CONFIG -----")
for k,v in CONFIG.items():
print("* {0} = {1}".format(k,v))
gmm = {}
for k,v in segments.items():
if v.shape[0] < 10:
print("[retrain_emissions.py] Fewer than 10 samples for state {}; skipping...".format(k))
pass
# train GMM:
if CONFIG['bayesian']:
gmm[k] = BayesianGaussianMixture(
n_components=CONFIG['ncomp'], max_iter=CONFIG['niter'], n_init=CONFIG['ninit'],
verbose=CONFIG['verbose']).fit(v.reshape(-1,1))
else:
gmm[k] = GaussianMixture(
n_components=CONFIG['ncomp'], max_iter=CONFIG['niter'], n_init=CONFIG['ninit'],
verbose=CONFIG['verbose']).fit(v.reshape(-1,1))
# print mixture model properties for each segment:
for k,v in gmm.items():
print("===== [{}] =====".format(k))
print("* Weights: {}".format(v.weights_))
print("* Means: {}".format(v.means_))
print("* Covariances: {}".format(v.covariances_))
hdf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train a mixture model.")
parser.add_argument("hdf_path",
help="Path to HDF5 file with segmented signal paths.")
parser.add_argument("--nreads", default=50, type=int,
help="Number of random reads to pool together and retrain upon. [50]")
parser.add_argument("--bayesian", default=False, action='store_true',
help="Use a dirichlet process mixture model. [False]")
parser.add_argument("--verbose", default=False, action='store_true',
help="Print verbose outputs during training. [False]")
parser.add_argument("--components", default=2, type=int,
help="If DPMM, max components; else fixed number of GMM components. [2]")
args = parser.parse_args()
assert (os.path.exists(args.hdf_path)), "File does not exist: {}".format(args.hdf_path)
retrain_emission(args.hdf_path, args.nreads, args.bayesian, args.components, args.verbose)
|
"""
Python provides a number of ways to perform printing. Research
how to print using the printf operator, the `format` string
method, and by using f-strings.
"""
x, y, z = 10, 2.24552, "I like turtles!"
# Using the printf operator (%), print the following feeding in the values of x,
# y, and z:
# x is 10, y is 2.25, z is "I like turtles!"
print('x is %d, y is %d, z is "%s"' % (x, y, z))
# Use the 'format' string method to print the same thing
print('x is {0}, y is {1}, z is "{2}"'.format(x, y, z))
# Finally, print the same thing using an f-string
print(f'x is {x}, y is {y}, z is "{z}"')
|
# Generated by Django 2.2.1 on 2020-06-02 00:33
from django.db import migrations
import manageXML.fields
class Migration(migrations.Migration):
dependencies = [
('manageXML', '0014_auto_20200517_1001'),
]
operations = [
migrations.AlterField(
model_name='historicallexeme',
name='lexeme',
field=manageXML.fields.BinaryCharField(max_length=250),
),
migrations.AlterField(
model_name='lexeme',
name='lexeme',
field=manageXML.fields.BinaryCharField(max_length=250),
),
]
|
"""
Developer: Rajat Shinde
Api Sources-
1. [nsetools](https://nsetools.readthedocs.io/en/latest/index.html)
2. [nsepy](https://nsepy.readthedocs.io/en/latest/)
3. [NSE India](http://www.nseindia.com/)
"""
import nsetools as nse
import streamlit as st
import nsepy
from nsepy import get_history
from datetime import date
import datetime
from nsetools import Nse
import pandas as pd
import numpy as np
st.write("""# ANSESAI: App for NSE Stocks And Indices""")
st.write("""### Developed by- [Rajat Shinde](http://home.iitb.ac.in/~rajatshinde)""")
st.write("##### *Note: Press Generate Charts button mutiple times if data is not fetched.*")
st.write(" ")
# #Importing Nse class instance
nse = Nse()
#Get advances decline information
if st.sidebar.button('Get Advances-Declines'):
st.table(nse.get_advances_declines())
#Select among Traded Stock Codes and Index Codes
codeList = ['Traded Stock Codes', 'Index Codes']
codeSelect = st.sidebar.selectbox(
'Which code do you want to analyze?',
codeList)
# st.write('Selected Option:', codeSelect)
all_stock_codes = nse.get_stock_codes()
all_stock_codes_values = list(nse.get_stock_codes().values())
if(codeSelect == 'Traded Stock Codes'):
option = st.sidebar.selectbox(
'Which Stock do you want to analyze?',
all_stock_codes_values[1:])
# st.write('You have selected:', option)
if st.sidebar.button('Get Stock Quote'):
# st.write(all_stock_codes)
reqKey = [key for (key, value) in all_stock_codes.items() if value == option]
st.write(nse.get_quote(reqKey[0]))
else:
option = st.sidebar.selectbox(
'Which Index do you want to analyze?',
list(nse.get_index_list()), index=1)
#st.write('You have selected:', option)
if st.sidebar.button('Get Index Quote'):
# st.write(all_stock_codes[option])
st.write(nse.get_index_quote(option))
#Button to get Open Price, Closed Price, High and Low
#TODO: Replace data selection by slider
startDate = st.sidebar.date_input("Select start date",datetime.date(2020, 3, 6))
endDate = st.sidebar.date_input("Select end date",datetime.date(2020, 7, 6))
if st.sidebar.button('Generate Charts'):
st.write("Fetching data for the %s %s!"%(option, codeSelect[:-1]))
if(codeSelect == 'Traded Stock Codes'):
data = get_history(symbol=option, start=startDate, end=endDate)
else:
data = get_history(symbol=option, start=startDate, end=endDate, index=True)
st.write("""### Closing Price Chart""")
st.line_chart(data.Close)
st.write("""### Opening Price Chart""")
st.line_chart(data.Open)
st.write("""### High Price Chart""")
st.bar_chart(data.High)
st.write("""### Low Price Chart""")
st.bar_chart(data.Low)
st.write("""### Opening/Closing Price Chart""")
arr1 = np.vstack([data.Open, data.Close])
st.line_chart(pd.DataFrame(arr1.T, columns=["Opening", "Closing"]))
st.write("""### High/Low Price Chart""")
arr2 = np.vstack([data.High, data.Low])
st.line_chart(pd.DataFrame(arr2.T, columns=["High", "Low"]))
st.write("""### Combined Price Chart""")
arr = np.vstack([data.Open, data.Close, data.High, data.Low])
st.line_chart(pd.DataFrame(arr.T, columns=["Opening", "Closing", "High", "Low"]))
st.write("""### Volume""")
st.line_chart(data.Volume)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
import time
from c8ydm.core.apt_package_manager import AptPackageManager
from c8ydm.framework.modulebase import Initializer, Listener
from c8ydm.framework.smartrest import SmartRESTMessage
class SoftwareManager(Listener, Initializer):
logger = logging.getLogger(__name__)
apt_package_manager = AptPackageManager()
def group(self, seq, sep):
result = [[]]
for e in seq:
#logging.info("e: "+str(e) +" sep: " + str(sep))
if sep not in str(e):
result[-1].append(e)
else:
result[-1].append(e[:e.find(sep)])
result.append([])
if result[-1] == []:
result.pop() # thx iBug's comment
return result
def handleOperation(self, message):
try:
if 's/ds' in message.topic and message.messageId == '528':
# When multiple operations received just take the first one for further processing
#self.logger.debug("message received :" + str(message.values))
messages = self.group(message.values, '\n')[0]
deviceId = messages.pop(0)
self.logger.info('Software update for device ' +
deviceId + ' with message ' + str(messages))
executing = SmartRESTMessage(
's/us', '501', ['c8y_SoftwareUpdate'])
self.agent.publishMessage(executing)
softwareToInstall = [messages[x:x + 4]
for x in range(0, len(messages), 4)]
errors = self.apt_package_manager.install_software(
softwareToInstall, True)
self.logger.info('Finished all software update')
if len(errors) == 0:
# finished without errors
finished = SmartRESTMessage(
's/us', '503', ['c8y_SoftwareUpdate'])
else:
# finished with errors
finished = SmartRESTMessage(
's/us', '502', ['c8y_SoftwareUpdate', ' - '.join(errors)])
self.agent.publishMessage(finished)
self.agent.publishMessage(
self.apt_package_manager.getInstalledSoftware(False))
if 's/ds' in message.topic and message.messageId == '516':
# When multiple operations received just take the first one for further processing
#self.logger.debug("message received :" + str(message.values))
messages = self.group(message.values, '\n')[0]
#self.logger.info("message processed:" + str(messages))
deviceId = messages.pop(0)
self.logger.info('Software update for device ' +
deviceId + ' with message ' + str(messages))
executing = SmartRESTMessage(
's/us', '501', ['c8y_SoftwareList'])
self.agent.publishMessage(executing)
softwareToInstall = [messages[x:x + 3]
for x in range(0, len(messages), 3)]
errors = self.apt_package_manager.installSoftware(
softwareToInstall, True)
self.logger.info('Finished all software update')
if len(errors) == 0:
# finished without errors
finished = SmartRESTMessage(
's/us', '503', ['c8y_SoftwareList'])
else:
# finished with errors
finished = SmartRESTMessage(
's/us', '502', ['c8y_SoftwareList', ' - '.join(errors)])
self.agent.publishMessage(finished)
self.agent.publishMessage(
self.apt_package_manager.getInstalledSoftware(False))
except Exception as e:
self.logger.exception(e)
failed = SmartRESTMessage(
's/us', '502', ['c8y_SoftwareList', str(e)])
self.agent.publishMessage(failed)
failed = SmartRESTMessage(
's/us', '502', ['c8y_SoftwareUpdate', str(e)])
self.agent.publishMessage(failed)
def getSupportedOperations(self):
return ['c8y_SoftwareUpdate', 'c8y_SoftwareList']
def getSupportedTemplates(self):
return []
def getMessages(self):
installedSoftware = self.apt_package_manager.getInstalledSoftware(False)
return [installedSoftware]
|
import sys
class ParseError(Exception):
pass
class MissingArgumentError(Exception):
pass
class Options(object):
def __init__(self, args):
if len(args) < 2:
raise MissingArgumentError
parts = args[0].split('/')
if len(parts) != 2:
raise ParseError
self.repository = parts[1]
self.username = parts[0]
self.emails = args[1:]
def parse(argv = None):
if argv == None:
argv = sys.argv[1:]
return Options(argv)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import print_function, absolute_import
import os
import re
from click_project.config import config
from click_project.decorators import command, option, argument
from click_project.lib import cd, call
@command(ignore_unknown_options=True, handle_dry_run=True)
@option('--revision', default='HEAD', help="Revision to checkout")
@option('--username', default=None, help="The user name")
@option('--password', default=None, help="The user password")
@option('--auth-cache/--no-auth-cache', default=False, help="Cache authentication")
@option('--interactive/--no-interactive', default=False, help="Interactive prompting")
@option('--trust-server-cert/--no-trust-server-cert', default=True, help="Accept unknown certificates")
@argument('url', help="The repository URL")
@argument('directory', required=False, help="The destination directory")
@argument('args', nargs=-1, help="Extra arguments to pass to the svn program")
def svn_sync(revision, username, password, url, auth_cache, interactive, trust_server_cert, directory, args):
"""Retrieve and/or update a svn repository"""
directory = directory or re.split('[:/]', url)[-1]
args = list(args)
if username is not None:
args += ['--username', username]
if password is not None:
args += ['--password', password]
if not auth_cache:
args += ['--no-auth-cache']
if not interactive:
args += ['--non-interactive']
if trust_server_cert:
args += ['--trust-server-cert']
if os.path.exists(directory):
with cd(directory):
call(['svn', 'up', '--revision', revision] + args, env=config.old_env)
else:
call(['svn', 'checkout', '--revision', revision] + args + [url, directory], env=config.old_env)
|
def modfunc(f):
# lalala
f() # call back to caller
|
import sys
if sys.argv[0].endswith("__main__.py"):
sys.argv[0] = "python -m tktable"
from . import _test as main
main()
|
"""Tests for sanskrit_parser
"""
|
import os
import subprocess
import numpy as np
import re
from collections import OrderedDict
from example.cbnu.utils import get_interval
data_source_classes = OrderedDict()
possible_modes = ['one-file', 'multi-file', 'one-dir', 'multi-dir', 'other']
import neo
class DataSourceBase:
gui_params = None
def __init__(self):
# important total_channel != nb_channel because nb_channel=len(channels)
self.total_channel = None
self.sample_rate = None
self.nb_segment = None
self.dtype = None
self.bit_to_microVolt = None
def get_segment_shape(self, seg_num):
raise NotImplementedError
def get_channel_names(self):
raise NotImplementedError
def get_signals_chunk(self, seg_num=0, i_start=None, i_stop=None):
raise NotImplementedError
def load(self):
pass
def get_channel_names(stream):
channel_names = []
ref_label = '15'
for info in stream.channel_infos.values():
label = stream.channel_infos[info.channel_id].label
if label == 'Ref':
all_labels = [info.label for info in stream.channel_infos.values()]
assert ref_label not in all_labels, \
"Reference electrode was assumed to be {}. But this label " \
"is already in use.".format(ref_label)
label = ref_label
channel_names.append('ch' + label)
return channel_names
def get_all_channel_data(stream):
num_channels, num_timesteps = stream.channel_data.shape
# Data type is fixed to float because ``stream.get_channel_in_range``
# returns traces in Volt. Otherwise, could use
# ``stream.channel_data.dtype``, which would be 'int32'.
dtype = 'float32'
scales = []
offsets = []
sample_rates = []
channel_ids = []
row_ids = []
for k in stream.channel_infos.keys():
info = stream.channel_infos[k]
channel_ids.append(info.channel_id)
row_ids.append(info.row_index)
scales.append(info.adc_step.magnitude)
offsets.append(info.get_field('ADZero'))
sample_rates.append(int(info.sampling_frequency.magnitude))
assert np.array_equiv(sample_rates, sample_rates[0]), \
"Recording contains different sample rates."
is_parallelizable = (np.array_equiv(scales, scales[0]) and
np.array_equiv(offsets, offsets[0]))
is_permuted = not np.array_equal(channel_ids, row_ids)
if is_parallelizable:
if is_permuted:
channel_data_permuted = np.array(stream.channel_data)
# If the experimenter disabled recording certain electrodes,
# num_channels may be less than the total number of electrodes,
# resulting in an index error when writing to channel_data.
min_num_channels = max(num_channels, np.max(channel_ids) + 1)
channel_data = np.zeros((min_num_channels, num_timesteps), dtype)
channel_data[channel_ids] = channel_data_permuted[row_ids]
else:
channel_data = np.array(stream.channel_data, dtype)
channel_data = np.transpose(channel_data)
np.subtract(channel_data, offsets[0], channel_data)
np.multiply(channel_data, scales[0], channel_data)
else:
# Todo: This case does not handle the situation where num_channels is
# less than the full number of electrodes in the MEA.
import sys
channel_data = np.zeros((num_timesteps, num_channels), dtype)
for i_channel in range(num_channels):
channel_data[:, i_channel] = stream.get_channel_in_range(
i_channel, 0, num_timesteps - 1)[0]
status = (i_channel + 1) / num_channels
sys.stdout.write('\r{:>7.2%}'.format(status))
sys.stdout.flush()
print('')
return channel_data, sample_rates[0]
class H5DataSource(DataSourceBase):
"""DataSource from h5 files."""
mode = 'multi-file'
def __init__(self, filenames, gui=None):
DataSourceBase.__init__(self)
self.gui = gui
self.log = print if self.gui is None else self.gui.log
self.filenames = [filenames] if isinstance(filenames, str) \
else filenames
for filename in self.filenames:
assert os.path.exists(filename), \
"File {} could not be found.".format(filename)
self.nb_segment = len(self.filenames)
self.array_sources = []
self.channel_names = None
self.bit_to_microVolt = 1 # Signal is already in uV.
def load(self):
from McsPy.McsData import RawData
start_time = int(self.gui.config['start_time'] * 1e6)
stop_time = int(self.gui.config['stop_time'] * 1e6)
sample_rates = []
dtypes = []
num_channels = []
channel_names = []
for filename in self.filenames:
self.log("Loading .h5 file: {}".format(filename))
data = RawData(filename)
assert len(data.recordings) == 1, \
"Can only handle a single recording per file."
electrode_data = None
stream_id = None
analog_streams = data.recordings[0].analog_streams
for stream_id, stream in analog_streams.items():
if stream.data_subtype == 'Electrode':
electrode_data = stream
break
assert electrode_data is not None, "Electrode data not found."
traces, sample_rate = get_all_channel_data(electrode_data)
us_per_tick = int(1e6 / sample_rate)
start_tick = start_time // us_per_tick
stop_tick = stop_time // us_per_tick
full_duration = len(traces)
if stop_tick >= full_duration:
stop_tick = full_duration
self.array_sources.append(traces[start_tick:stop_tick])
sample_rates.append(sample_rate)
dtypes.append(traces.dtype)
num_channels.append(traces.shape[1])
channel_names_of_file = get_channel_names(electrode_data)
channel_names.append(channel_names_of_file)
trigger_data = []
trigger_times = []
event_streams = data.recordings[0].event_streams
# First, try loading trigger data from digital event stream.
if event_streams is not None:
for event_stream in event_streams.values():
for d in event_stream.event_entity.values():
if d.info.label == 'Digital Event Detector Event' or \
'Single Pulse Start' in d.info.label:
tr_times = d.data[0]
trigger_ticks = tr_times // us_per_tick
tr_data = np.zeros(full_duration)
tr_data[trigger_ticks] = 1
trigger_data.append(tr_data[start_tick:stop_tick])
trigger_times.append(
get_interval(tr_times, start_time, stop_time))
# If triggers not stored as digital events, try analog stream.
if len(trigger_times) == 0:
analog_stream_id = (stream_id + 1) % 2
tr_data = analog_streams[analog_stream_id].channel_data[0]
trigger_ticks = np.flatnonzero(np.diff(tr_data) >
np.abs(np.min(tr_data)))
tr_times = trigger_ticks * us_per_tick
trigger_times.append(
get_interval(tr_times, start_time, stop_time))
trigger_data.append(tr_data[start_tick:stop_tick])
# If no triggers are available (e.g. spontaneous activity), create
# null array.
if len(trigger_times) == 0:
trigger_times.append(np.array([]))
trigger_data.append(np.zeros(stop_tick - start_tick))
# Save stimulus as compressed numpy file for later use in GUI.
for i in range(len(trigger_data)):
dirname, basename = os.path.split(filename)
basename, _ = os.path.splitext(basename)
np.savez_compressed(
os.path.join(dirname, '{}_stimulus{}'.format(basename, i)),
times=trigger_times[i], data=trigger_data[i])
# Save another copy as text file for easier access in matlab.
np.savetxt(os.path.join(
dirname, '{}_trigger_times{}.txt'.format(basename, i)),
trigger_times[i], fmt='%d')
np.savetxt(os.path.join(
dirname, '{}_trigger_data{}.txt'.format(basename, i)),
trigger_data[i], fmt='%d')
# Make sure that every file uses the same sample rate, dtype, etc.
assert np.array_equiv(sample_rates, sample_rates[0]), \
"Recording contains different sample rates."
assert np.array_equiv(dtypes, dtypes[0]), \
"Recording contains different dtypes."
assert np.array_equiv(num_channels, num_channels[0]), \
"Recording contains different number of channels."
assert np.array_equiv(channel_names, channel_names[0]), \
"Recording contains different channel names."
self.total_channel = num_channels[0]
self.sample_rate = sample_rates[0]
self.dtype = dtypes[0]
self.channel_names = channel_names[0]
self.log("Finished initializing DataSource.")
def get_segment_shape(self, seg_num):
return self.array_sources[seg_num].shape
def get_signals_chunk(self, seg_num=0, i_start=None, i_stop=None):
return self.array_sources[seg_num][i_start:i_stop, :]
def get_channel_names(self):
from McsPy.McsData import RawData
if self.channel_names is None:
channel_names = []
for filename in self.filenames:
data = RawData(filename)
stream = data.recordings[0].analog_streams[0]
channel_names.append(get_channel_names(stream))
assert np.array_equiv(channel_names, channel_names[0]), \
"Recording contains different channel names."
self.channel_names = channel_names[0]
return self.channel_names
class MsrdDataSource(H5DataSource):
"""DataSource from MCS2100."""
def __init__(self, filenames, gui=None):
log = print if gui is None else gui.log
if isinstance(filenames, str):
filenames = [filenames]
filenames_h5 = []
for filename in filenames:
assert os.path.exists(filename), \
"File {} could not be found.".format(filename)
msg = "Converting file from .msrd to .h5: {}".format(filename)
log(msg)
basename = os.path.splitext(filename)[0]
filenames_h5.append(basename + '.h5')
subprocess.run(["MCDataConv", "-t", "hdf5", basename + '.msrs'])
log("Done converting.")
H5DataSource.__init__(self, filenames_h5, gui)
class McdDataSource(H5DataSource):
"""DataSource from MCS1060."""
def __init__(self, filenames, gui=None):
log = print if gui is None else gui.log
if isinstance(filenames, str):
filenames = [filenames]
filenames_h5 = []
for filename in filenames:
assert os.path.exists(filename), \
"File {} could not be found.".format(filename)
msg = "Converting file from .mcd to .h5: {}".format(filename)
log(msg)
filenames_h5.append(os.path.splitext(filename)[0] + '.h5')
subprocess.run(["MCDataConv", "-t", "hdf5", filename])
log("Done converting.")
H5DataSource.__init__(self, filenames_h5, gui)
class InMemoryDataSource(DataSourceBase):
"""
DataSource in memory numpy array.
This is for debugging or fast testing.
"""
mode = 'other'
def __init__(self, nparrays=[], sample_rate=None):
DataSourceBase.__init__(self)
self.nparrays = nparrays
self.nb_segment = len(self.nparrays)
self.total_channel = self.nparrays[0].shape[1]
self.sample_rate = sample_rate
self.dtype = self.nparrays[0].dtype
def get_segment_shape(self, seg_num):
full_shape = self.nparrays[seg_num].shape
return full_shape
def get_signals_chunk(self, seg_num=0, i_start=None, i_stop=None):
data = self.nparrays[seg_num][i_start:i_stop, :]
return data
def get_channel_names(self):
return ['ch{}'.format(i) for i in range(self.total_channel)]
data_source_classes['InMemory'] = InMemoryDataSource
class RawDataSource(DataSourceBase):
"""
DataSource from raw binary file. Easy case.
"""
mode = 'multi-file'
gui_params = [
{'name': 'dtype', 'type': 'list', 'values':['int16', 'uint16', 'float32', 'float64']},
{'name': 'total_channel', 'type': 'int', 'value':1},
{'name': 'sample_rate', 'type': 'float', 'value':10000., 'step': 1000., 'suffix': 'Hz', 'siPrefix': True},
{'name': 'offset', 'type': 'int', 'value':0},
]
def __init__(self, filenames=[], dtype='int16', total_channel=0,
sample_rate=0., offset=0, bit_to_microVolt=None, channel_names=None):
DataSourceBase.__init__(self)
self.filenames = filenames
if isinstance(self.filenames, str):
self.filenames = [self.filenames]
assert all([os.path.exists(f) for f in self.filenames]), 'files does not exist'
self.nb_segment = len(self.filenames)
self.total_channel = total_channel
self.sample_rate = sample_rate
self.dtype = np.dtype(dtype)
if bit_to_microVolt == 0.:
bit_to_microVolt = None
self.bit_to_microVolt = bit_to_microVolt
if channel_names is None:
channel_names = ['ch{}'.format(i) for i in range(self.total_channel)]
self.channel_names = channel_names
self.array_sources = []
for filename in self.filenames:
data = np.memmap(filename, dtype=self.dtype, mode='r', offset=offset)
#~ data = data[:-(data.size%self.total_channel)]
data = data.reshape(-1, self.total_channel)
self.array_sources.append(data)
def get_segment_shape(self, seg_num):
full_shape = self.array_sources[seg_num].shape
return full_shape
def get_signals_chunk(self, seg_num=0, i_start=None, i_stop=None):
data = self.array_sources[seg_num][i_start:i_stop, :]
return data
def get_channel_names(self):
return self.channel_names
data_source_classes['RawData'] = RawDataSource
import neo.rawio
io_gui_params = {
'RawBinarySignal':[
{'name': 'dtype', 'type': 'list', 'values':['int16', 'uint16', 'float32', 'float64']},
{'name': 'nb_channel', 'type': 'int', 'value':1},
{'name': 'sampling_rate', 'type': 'float', 'value':10000., 'step': 1000., 'suffix': 'Hz', 'siPrefix': True},
{'name': 'bytesoffset', 'type': 'int', 'value':0},
],
}
# hook for some neo.rawio that have problem with TDC (multi sampling rate or default params)
neo_rawio_hooks = {}
class Intan(neo.rawio.IntanRawIO):
def _parse_header(self):
neo.rawio.IntanRawIO._parse_header(self)
sig_channels = self.header['signal_channels']
sig_channels = sig_channels[sig_channels['group_id']==0]
self.header['signal_channels'] = sig_channels
def _get_signal_size(self, block_index, seg_index, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
assert np.unique(self.header['signal_channels'][channel_indexes]['group_id']).size == 1
channel_names = self.header['signal_channels'][channel_indexes]['name']
chan_name = channel_names[0]
size = self._raw_data[chan_name].size
return size
neo_rawio_hooks['Intan'] = Intan
class NeoRawIOAggregator(DataSourceBase):
"""
wrappe and agregate several neo.rawio in the class.
"""
gui_params = None
rawio_class = None
def __init__(self, **kargs):
DataSourceBase.__init__(self)
self.rawios = []
if 'filenames' in kargs:
filenames= kargs.pop('filenames')
self.rawios = [self.rawio_class(filename=f, **kargs) for f in filenames]
elif 'dirnames' in kargs:
dirnames= kargs.pop('dirnames')
self.rawios = [self.rawio_class(dirname=d, **kargs) for d in dirnames]
else:
raise(ValueError('Must have filenames or dirnames'))
self.sample_rate = None
self.total_channel = None
self.sig_channels = None
nb_seg = 0
self.segments = {}
for rawio in self.rawios:
rawio.parse_header()
assert not rawio._several_channel_groups, 'several sample rate for signals'
assert rawio.block_count() ==1, 'Multi block RawIO not implemented'
for s in range(rawio.segment_count(0)):
#nb_seg = absolut seg index and s= local seg index
self.segments[nb_seg] = (rawio, s)
nb_seg += 1
if self.sample_rate is None:
self.sample_rate = rawio.get_signal_sampling_rate()
else:
assert self.sample_rate == rawio.get_signal_sampling_rate(), 'bad joke different sample rate!!'
sig_channels = rawio.header['signal_channels']
if self.sig_channels is None:
self.sig_channels = sig_channels
self.total_channel = len(sig_channels)
else:
assert np.all(sig_channels==self.sig_channels), 'bad joke different channels!'
self.nb_segment = len(self.segments)
self.dtype = np.dtype(self.sig_channels['dtype'][0])
units = sig_channels['units'][0]
#~ assert 'V' in units, 'Units are not V, mV or uV'
if units =='V':
self.bit_to_microVolt = self.sig_channels['gain'][0]*1e-6
elif units =='mV':
self.bit_to_microVolt = self.sig_channels['gain'][0]*1e-3
elif units =='uV':
self.bit_to_microVolt = self.sig_channels['gain'][0]
else:
self.bit_to_microVolt = None
def get_segment_shape(self, seg_num):
rawio, s = self.segments[seg_num]
l = rawio.get_signal_size(0, s)
return l, self.total_channel
def get_channel_names(self):
return self.sig_channels['name'].tolist()
def get_signals_chunk(self, seg_num=0, i_start=None, i_stop=None):
rawio, s = self.segments[seg_num]
return rawio.get_analogsignal_chunk(block_index=0, seg_index=s,
i_start=i_start, i_stop=i_stop)
#Construct the list with taking local class with hooks dict
rawiolist = []
for rawio_class in neo.rawio.rawiolist:
name = rawio_class.__name__.replace('RawIO', '')
if name in neo_rawio_hooks:
rawio_class = neo_rawio_hooks[name]
rawiolist.append(rawio_class)
if neo.rawio.RawBinarySignalRawIO in rawiolist:
# to avoid bug in readthe doc with moc
RawBinarySignalRawIO = rawiolist.pop(rawiolist.index(neo.rawio.RawBinarySignalRawIO))
#~ rawiolist.insert(0, RawBinarySignalRawIO)
for rawio_class in rawiolist:
name = rawio_class.__name__.replace('RawIO', '')
class_name = name+'DataSource'
datasource_class = type(class_name,(NeoRawIOAggregator,), { })
datasource_class.rawio_class = rawio_class
if rawio_class.rawmode in ('multi-file', 'one-file'):
#multi file in neo have another meaning
datasource_class.mode = 'multi-file'
elif rawio_class.rawmode in ('one-dir', ):
datasource_class.mode = 'multi-dir'
else:
continue
#gui stuffs
if name in io_gui_params:
datasource_class.gui_params = io_gui_params[name]
data_source_classes[name] = datasource_class
#~ print(datasource_class, datasource_class.mode )
data_source_classes['mcd'] = McdDataSource
data_source_classes['msrd'] = MsrdDataSource
data_source_classes['h5'] = H5DataSource
#TODO implement KWIK and OpenEphys
#https://open-ephys.atlassian.net/wiki/display/OEW/Data+format
# https://github.com/open-ephys/analysis-tools/tree/master/Python3
|
from django.apps import AppConfig
class SoundStoreConfig(AppConfig):
name = 'sound_store'
|
import cv2
import cvt_utils.tools as tl
# take as white-black == 0 (no BGR)
img = cv2.imread("code/sudoku.png", 0)
assert len(img.shape) == 2 # only X, Y, without BGR channels
min_th = 127
max_th = 255
# basic
get_basic_th = lambda coef: list(cv2.threshold(img, min_th, max_th, coef))[1]
basic_binary_th_img1 = tl.concat_hor((img, get_basic_th(cv2.THRESH_BINARY)))
basic_binary_th_img2 = tl.concat_hor((img, get_basic_th(cv2.THRESH_BINARY_INV)))
basic_binary_th_img = tl.concat_ver((basic_binary_th_img1, basic_binary_th_img2))
cv2.imwrite("code/sudoku-basic-binary-th.png", basic_binary_th_img)
cv2.imshow("Sudoku", basic_binary_th_img)
cv2.waitKey(0)
# adaptive
block_size = 11 # 11 x 11
th_adaptive_mean = cv2.adaptiveThreshold(img, max_th, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, block_size, 2)
th_adaptive_gaus = cv2.adaptiveThreshold(img, max_th, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 2)
row1 = tl.concat_hor((img, th_adaptive_mean))
row2 = tl.concat_hor((img, th_adaptive_gaus))
final_img = tl.concat_ver((row1, row2))
cv2.imwrite("code/sudoku-adaptive-th.png", final_img)
cv2.imshow("Sudoku-2", final_img)
cv2.waitKey(0)
# otsu
# Otsu's thresholding
_, th2 = cv2.threshold(img, 0, max_th, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(img, (5, 5), 1)
_, th3 = cv2.threshold(blur, 0, max_th, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
row1 = tl.concat_hor((img, th2))
row2 = tl.concat_hor((img, th3))
final_img = tl.concat_ver((row1, row2))
cv2.imwrite("code/sudoku-otsu-th.png", final_img)
cv2.imshow("Sudoku-3", final_img)
cv2.waitKey(0)
ret_o, th_o = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
print "Otsu threshold %d" % ret_o
|
from model.product import Product
class Item:
def __init__(self, product: Product, quantity: int):
self.__product = product
self.__quantity = quantity
def get_total_price(self):
return self.__product.get_price() * self.__quantity
|
cumulative_sum = []
list_data = 0
for i in range (1, 21):
list_data = list_data + i
cumulative_sum.append(list_data)
print (cumulative_sum)
|
"""Program to draw Mandelbrot fractals: the graphical user interface.
Author: Lars van den Haak and Tom Verhoeff
Copyright (c) 2020 - Eindhoven University of Technology, The Netherlands
This software is made available under the terms of the MIT License.
* Contributor 1: Harry Verspagen
* TU/e ID number 1: 1484575
* Contributor 2: Sander Debets
* TU/e ID number 2: 1252402
* Date: 04-05-2020
"""
from PIL import Image, ImageTk # type: ignore
import tkinter as tk
from mandel import *
from typing import Callable
def squares(px: int, py: int, c1: Color = GREEN, c2: Color = BLUE) -> Color:
"""Colors the screen in squares of 20 pixels
:param: px: pixel x-coordinate
:param: py: pixel y-coordinate
:param: c1: Color of the first type of square
:param: c2: Color of the second type of square
:return: Color for the input pixel
"""
if px // 20 % 2 == py // 20 % 2:
c = c1
else:
c = c2
return c
class GUI:
"""A class where we make our Graphical User Interface based on TkInter
"""
def __init__(self) -> None:
self.image = None
self.window = tk.Tk()
self.canvas = tk.Label(self.window, image=None)
self.canvas.pack(side="bottom", fill="both", expand="yes")
def make_image(gui: GUI, colorize: Callable[[int, int], Color] = squares) -> None:
"""Puts an image on screen created by a function
:param: gui: An instance from the GUI class
:param: colorize: A function that gives a color to each pixel
"""
img = Image.new('RGB', (600, 600))
for x in range(0, 600):
for y in range(0, 600):
img.putpixel((x, y), colorize(x, y))
tkimg = ImageTk.PhotoImage(img)
# Save the image to the gui class, otherwise it gets garbage collected
gui.image = tkimg
canvas = gui.canvas
canvas.configure(image=tkimg)
canvas.pack(side="bottom", fill="both", expand="yes")
|
import numpy as np
import tables
import torch
from torch.utils.data import Dataset, Subset, TensorDataset
class RadioML2018(Dataset):
def __init__(self, path, transform=None, target_transform=None):
super().__init__()
self.path = path
self.transform = transform
self.target_transform = target_transform
self._file = None
@property
def labels(self):
self.open()
return np.array(self._file.root.Y)
@property
def snr(self):
self.open()
return np.array(self._file.root.Z)
def __getitem__(self, index):
self.open()
x, y, z = self._file.root.X[index], self._file.root.Y[index], self._file.root.Z[index]
x, y, z = torch.from_numpy(x), torch.as_tensor(y), torch.as_tensor(z)
if self.transform is not None:
x = self.transform(x)
if self.target_transform is not None:
y = self.target_transform(y)
return x, y, z
def __len__(self):
try:
return len(self._file.root.X)
except AttributeError:
self.open()
length = len(self._file.root.X)
self.close()
return length
def open(self):
if self._file is None:
self._file = tables.open_file(self.path)
def close(self):
if self._file is not None:
self._file.close()
self._file = None
def create_subset_indices(filepath, min_snr=None, max_snr=None):
with tables.open_file(filepath) as f:
snrs = np.array(f.root.Z).squeeze()
if min_snr is None:
min_snr = np.iinfo(snrs.dtype).min
if max_snr is None:
max_snr = np.iinfo(snrs.dtype).max
return np.argwhere((snrs >= min_snr) & (snrs <= max_snr)).squeeze()
def load_dataset(path, min_snr=None, max_snr=None, in_memory=False):
if in_memory:
with tables.open_file(path) as f:
x, y, z = np.array(f.root.X), np.array(f.root.Y), np.array(f.root.Z)
ds = TensorDataset(torch.from_numpy(x), torch.from_numpy(y), torch.from_numpy(z))
else:
ds = RadioML2018(path)
if min_snr is not None:
indices = create_subset_indices(path, min_snr, max_snr=max_snr)
ds = Subset(ds, indices=indices.tolist())
return ds
__all__ = ["RadioML2018", "load_dataset"]
|
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from config_tempest.services.base import VersionedService
class LoadBalancerService(VersionedService):
def set_versions(self):
super(LoadBalancerService, self).set_versions(top_level=False)
def set_default_tempest_options(self, conf):
conf.set('load_balancer', 'enable_security_groups', 'True')
conf.set('load_balancer', 'admin_role', 'admin')
conf.set('load_balancer', 'RBAC_test_type', 'owner_or_admin')
@staticmethod
def get_service_type():
return ['load-balancer']
@staticmethod
def get_codename():
return 'octavia'
def post_configuration(self, conf, is_service):
conf.set('load_balancer', 'member_role',
conf.get('auth', 'tempest_roles').split(',')[0])
|
from pygnmi.client import gNMIclient
from pprint import pprint as pp
import json
host = ('10.73.1.105', '6030')
username ='arista'
password ='arista'
with gNMIclient(target=host, username=username, password=password, insecure=True) as gc:
result = gc.capabilities()
pp(result)
|
def toys(w):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/priyanka-and-toys/problem
Priyanka works for an international toy company that ships by container. Her task is to the determine the lowest
cost way to combine her orders for shipping. She has a list of item weights. The shipping company has a requirement
that all items loaded in a container must weigh less than or equal to 4 units plus the weight of the minimum weight
item. All items meeting that requirement will be shipped in one container.
Solve:
We sort the array, and then we iterate through the list seeing if each order fits within the current lowest order's
weight. If it does, we can continue on, and if it doesn't, we then create a new "container" as this order no
longer fits within the previous order limit, and continue on through the orders.
Args:
w (list): Array representing the weighted orders
Returns:
int: The minimum number of containers needed to ship the orders
"""
containers = 1
w.sort()
cur_lowest = w[0]
# Iterate through the sorted list, and add a container if the next weighted order doesn't fit within the current
# lowest order's weight + 4
for i in range(1, len(w)):
if w[i] > cur_lowest + 4:
cur_lowest = w[i]
containers += 1
return containers
if __name__ == "__main__":
print(toys([1, 2, 3, 21, 7, 12, 14, 21]))
|
# ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
from iseg.metrics.seg_metric_wrapper import SegMetricWrapper
from iseg.metrics.mean_iou import MeanIOU
class SegMetricBuilder:
def __init__(self, num_class, ignore_label):
self.num_class = num_class
self.ignore_label = ignore_label
self.__metrics = []
def add(self, prefix="", use_iou=True, pre_compute_fn=None):
metrics_list = []
if prefix is None:
prefix = ""
if prefix != "":
prefix = prefix + "_"
if use_iou:
iou_metric = SegMetricWrapper(
MeanIOU(self.num_class), num_class=self.num_class, ignore_label=self.ignore_label, name=prefix + "IOU"
)
iou_metric.add_pre_compute_fn(pre_compute_fn)
metrics_list.append(iou_metric)
self.__metrics.append(metrics_list)
@property
def metrics(self):
return self.__metrics
|
from __future__ import annotations
import inspect
from enum import Enum
from typing import Any, Optional, Tuple
class Location:
file: str
line_no: int
source_line: str
def __init__(self, file: str, line_no: int, source_line: str):
self.file = file
self.line_no = line_no
self.source_line = source_line
def __str__(self):
buf = f"{self.file}:{self.line_no}"
if self.source_line:
for i, line in enumerate(self.source_line.splitlines()):
if i < 5:
buf += f"\n{'{:3}'.format(self.line_no + i)} | {line}"
if i >= 5:
buf += " | ..."
return buf
def __repr__(self):
return f"Location(file={self.file.__repr__()}, line_no={self.line_no.__repr__()}, source_line={repr(self.source_line)})"
def __eq__(self, other):
if not isinstance(other, Location):
return False
return self.file == other.file and self.line_no == other.line_no
@staticmethod
def from_code(obj) -> Location:
try:
return Location(
file=inspect.getfile(obj),
line_no=inspect.getsourcelines(obj)[1],
source_line="".join(inspect.getsourcelines(obj)[0]),
)
except Exception:
return Location(
file=inspect.getfile(obj),
line_no=1,
source_line=repr(obj)
)
@staticmethod
def from_stack(stack) -> Location:
if isinstance(stack, inspect.FrameInfo):
try:
return Location(
file=stack.filename,
line_no=stack.lineno,
source_line=stack.code_context[0]
)
except Exception:
return Location(
file=stack.filename,
line_no=stack.lineno,
source_line=None
)
else: # assume sys._getframe(...)
try:
source_line = inspect.findsource(stack.f_code)[0][stack.f_lineno - 1]
return Location(
file=stack.f_code.co_filename,
line_no=stack.f_lineno,
source_line=source_line
)
except Exception:
return Location(
file=stack.f_code.co_filename,
line_no=stack.f_lineno,
source_line=None
)
class Frame:
type_declared: str
indicator_line: str
declared: Optional[Location]
responsable: Optional[Location]
responsibility_type: Optional[ResponsibilityType]
def __init__(self, type_declared: str, indicator_line: Optional[str],
declared: Optional[Location], responsable: Optional[Location]):
self.type_declared = type_declared
if indicator_line is None:
indicator_line = '^' * len(type_declared)
self.indicator_line = indicator_line
self.declared = declared
self.responsable = responsable
def __str__(self):
buf = f"in: {self.type_declared}\n" \
f" {self.indicator_line}\n"
if self.responsable is not None:
buf += f"{self.responsable.file}:{self.responsable.line_no}:\n" \
f"{self.responsable.source_line}\n" \
f"\n"
return buf
class ResponsibilityType(Enum):
IN = 0
OUT = 1
def invert(self):
if self is ResponsibilityType.IN:
return ResponsibilityType.OUT
else:
return ResponsibilityType.IN
def join_lines(l: list[str]) -> str:
return '\n'.join([x.rstrip() for x in l])
class UntypyTypeError(TypeError):
given: Any
expected: str
frames: list[Frame]
notes: list[str]
previous_chain: Optional[UntypyTypeError]
responsibility_type: ResponsibilityType
def __init__(self, given: Any, expected: str, frames: list[Frame] = [],
notes: list[str] = [],
previous_chain: Optional[UntypyTypeError] = None,
responsibility_type: ResponsibilityType = ResponsibilityType.IN):
self.responsibility_type = responsibility_type
self.given = given
self.expected = expected
self.frames = frames.copy()
for frame in self.frames:
if frame.responsibility_type is None:
frame.responsibility_type = responsibility_type
self.notes = notes.copy()
self.previous_chain = previous_chain
super().__init__('\n' + self.__str__())
def next_type_and_indicator(self) -> Tuple[str, str]:
if len(self.frames) >= 1:
frame = self.frames[-1]
return frame.type_declared, frame.indicator_line
else:
return self.expected, "^" * len(self.expected)
def with_frame(self, frame: Frame) -> UntypyTypeError:
frame.responsibility_type = self.responsibility_type
return UntypyTypeError(self.given, self.expected, self.frames + [frame],
self.notes, self.previous_chain, self.responsibility_type)
def with_previous_chain(self, previous_chain: UntypyTypeError):
return UntypyTypeError(self.given, self.expected, self.frames,
self.notes, previous_chain, self.responsibility_type)
def with_note(self, note: str):
return UntypyTypeError(self.given, self.expected, self.frames,
self.notes + [note], self.previous_chain, self.responsibility_type)
def with_inverted_responsibility_type(self):
return UntypyTypeError(self.given, self.expected, self.frames,
self.notes, self.previous_chain, self.responsibility_type.invert())
def last_responsable(self):
for f in reversed(self.frames):
if f.responsable is not None and f.responsibility_type is ResponsibilityType.IN:
return f.responsable
return None
def last_declared(self):
for f in reversed(self.frames):
if f.declared is not None:
return f.declared
return None
def __str__(self):
declared_locs = []
responsable_locs = []
for f in self.frames:
if f.responsable is not None and f.responsibility_type is ResponsibilityType.IN and str(
f.responsable) not in responsable_locs:
responsable_locs.append(str(f.responsable))
if f.declared is not None and str(f.declared) not in declared_locs:
declared_locs.append(str(f.declared))
cause = join_lines(responsable_locs)
declared = join_lines(declared_locs)
(ty, ind) = self.next_type_and_indicator()
notes = join_lines(self.notes)
if notes:
notes = notes + "\n\n"
if self.previous_chain is None:
previous_chain = ""
else:
previous_chain = self.previous_chain.__str__()
if previous_chain:
previous_chain = previous_chain.strip() + "\n\n"
ctx = ""
if self.expected != ty:
ctx = f"context: {ty.rstrip()}\n" \
f" {ind.rstrip()}"
given = repr(self.given)
expected = self.expected.strip()
if expected != 'None':
expected = f'value of type {expected}'
return (f"""
{previous_chain}{notes}given: {given.rstrip()}
expected: {expected}
{ctx}
declared at: {declared}
caused by: {cause}""")
class UntypyAttributeError(AttributeError):
def __init__(self, message: str, locations: list[Location] = []):
self.message = message
self.locations = locations.copy()
super().__init__(self.__str__())
def with_location(self, loc: Location) -> UntypyAttributeError:
return UntypyAttributeError(self.message, self.locations + [loc])
def __str__(self):
locations = '\n'.join(map(str, self.locations))
return f"{self.message}\n{locations}"
|
# Generated by Django 2.0.4 on 2018-04-11 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poem', '0006_remove_user_favorate_poem'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='favorate_peotry',
new_name='favourate_peotry',
),
migrations.AddField(
model_name='user',
name='favourate_author',
field=models.ManyToManyField(to='poem.Author'),
),
]
|
import os.path
import subprocess
def pre_build(**kwargs):
if not os.path.exists('node_modules'):
subprocess.run(['npm', 'install', '--ignore-scripts'], check=True)
subprocess.run(['npm', 'run', 'docs:docco'], check=True)
subprocess.run(['npm', 'run', 'docs:api'], check=True)
|
class corsMiddleware(object):
def process_response(self, req, resp):
resp["Access-Control-Allow-Origin"] = "*"
return resp
|
#!/usr/bin/env python
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
def main():
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import os
import sys
command = sys.argv[0]
command_dir = os.path.dirname(command)[:-3]
sys.path.append(command_dir)
from docutils.core import publish_cmdline, default_description
import html5css3
description = ('Generates html5 documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html5', writer=html5css3.Writer(),
description=description)
|
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import align.detect_face
from scipy import misc
import imageio
import time
with tf.Graph().as_default():
config = tf.ConfigProto(device_count={"CPU": 4}, # limit to num_cpu_core CPU usage
inter_op_parallelism_threads = 6,
intra_op_parallelism_threads = 4,
use_per_session_threads = 2,
log_device_placement=True)
sess = tf.Session(config = config)
with sess.as_default():
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = align.detect_face.PNet({'data':data})
pnet.load('./align/det1.npy', sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = align.detect_face.RNet({'data':data})
rnet.load('./align/det2.npy', sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = align.detect_face.ONet({'data':data})
onet.load('./align/det3.npy', sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
source_path = "/home/luoyuhao/Datasets/CommanTest/1.png"
print("start...")
detect_time_start = time.time();
img = imageio.imread(source_path)
bounding_boxes, points = align.detect_face.detect_face(img, minsize, pnet_fun, rnet_fun, onet_fun, threshold, factor)
print("detect face time:",time.time()-detect_time_start)
#print('Bounding box:: %s' % bounding_boxes)
|
# -*- coding: utf-8 -*-
"""General utility classes and functions
Contains several classes and functions for quality of life. Used indiscriminately throughout the module.
Notes
-----
Functions should attempt to not contain stateful information, as this module will be called by other modules
throughout the program, including other Threads, and as such states may not be coherent.
"""
import os
import sys
import time
from functools import lru_cache
from math import log, floor
from typing import Union, Tuple
import mss
class Color:
""" Container class for a single color vector in HSBK color-space. """
__slots__ = ["hue", "saturation", "brightness", "kelvin"]
def __init__(self, hue: int, saturation: int, brightness: int, kelvin: int):
self.hue = hue
self.saturation = saturation
self.brightness = brightness
self.kelvin = kelvin
def __getitem__(self, item) -> int:
return self.__getattribute__(self.__slots__[item])
def __len__(self) -> int:
return 4
def __setitem__(self, key, value):
self.__setattr__(self.__slots__[key], value)
def __str__(self) -> str:
return "[{}, {}, {}, {}]".format(
self.hue, self.saturation, self.brightness, self.kelvin
)
def __repr__(self) -> str:
return [self.hue, self.saturation, self.brightness, self.kelvin].__repr__()
def __eq__(self, other) -> bool:
return (
self.hue == other.hue
and self.brightness == other.brightness
and self.saturation == other.saturation
and self.kelvin == other.kelvin
)
def __add__(self, other):
return Color(
self.hue + other[0],
self.saturation + other[1],
self.brightness + other[2],
self.kelvin + other[3],
)
def __sub__(self, other):
return self.__add__([-v for v in other])
# Derived types
TypeRGB = Union[Tuple[int, int, int], Color]
TypeHSBK = Union[Tuple[int, int, int, int], Color]
def HSBKtoRGB(hsvk: TypeHSBK) -> TypeRGB:
""" Convert Tuple in HSBK color-space to RGB space.
Converted from PHP https://gist.github.com/joshrp/5200913 """
# pylint: disable=invalid-name
iH, iS, iV, iK = hsvk
dS = (100 * iS / 65535) / 100.0 # Saturation: 0.0-1.0
dV = (100 * iV / 65535) / 100.0 # Lightness: 0.0-1.0
dC = dV * dS # Chroma: 0.0-1.0
dH = (360 * iH / 65535) / 60.0 # H-prime: 0.0-6.0
dT = dH # Temp variable
while dT >= 2.0: # php modulus does not work with float
dT -= 2.0
dX = dC * (1 - abs(dT - 1))
dHf = floor(dH)
if dHf == 0:
dR = dC
dG = dX
dB = 0.0
elif dHf == 1:
dR = dX
dG = dC
dB = 0.0
elif dHf == 2:
dR = 0.0
dG = dC
dB = dX
elif dHf == 3:
dR = 0.0
dG = dX
dB = dC
elif dHf == 4:
dR = dX
dG = 0.0
dB = dC
elif dHf == 5:
dR = dC
dG = 0.0
dB = dX
else:
dR = 0.0
dG = 0.0
dB = 0.0
dM = dV - dC
dR += dM
dG += dM
dB += dM
# Finally factor in Kelvin
# Adopted from:
# https://github.com/tort32/LightServer/blob/master/src/main/java/com/github/tort32/api/nodemcu/protocol/RawColor.java#L125
rgb_hsb = int(dR * 255), int(dG * 255), int(dB * 255)
rgb_k = kelvinToRGB(iK)
a = iS / 65535.0
b = (1.0 - a) / 255
x = int(rgb_hsb[0] * (a + rgb_k[0] * b))
y = int(rgb_hsb[1] * (a + rgb_k[1] * b))
z = int(rgb_hsb[2] * (a + rgb_k[2] * b))
return x, y, z
def hueToRGB(h: float, s: float = 1, v: float = 1) -> TypeRGB:
""" Convert a Hue-angle to an RGB value for display. """
# pylint: disable=invalid-name
h = float(h)
s = float(s)
v = float(v)
h60 = h / 60.0
h60f = floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0:
r, g, b = v, t, p
elif hi == 1:
r, g, b = q, v, p
elif hi == 2:
r, g, b = p, v, t
elif hi == 3:
r, g, b = p, q, v
elif hi == 4:
r, g, b = t, p, v
elif hi == 5:
r, g, b = v, p, q
r, g, b = int(r * 255), int(g * 255), int(b * 255)
return r, g, b
def kelvinToRGB(temperature: int) -> TypeRGB:
""" Convert a Kelvin (K) color-temperature to an RGB value for display."""
# pylint: disable=invalid-name
temperature /= 100
# calc red
if temperature < 66:
red = 255
else:
red = temperature - 60
red = 329.698727446 * (red ** -0.1332047592)
if red < 0:
red = 0
if red > 255:
red = 255
# calc green
if temperature < 66:
green = temperature
green = 99.4708025861 * log(green) - 161.1195681661
if green < 0:
green = 0
if green > 255:
green = 255
else:
green = temperature - 60
green = 288.1221695283 * (green ** -0.0755148492)
if green < 0:
green = 0
if green > 255:
green = 255
# calc blue
if temperature >= 66:
blue = 255
else:
if temperature < 19:
blue = 0
else:
blue = temperature - 10
blue = 138.5177312231 * log(blue) - 305.0447927307
if blue < 0:
blue = 0
if blue > 255:
blue = 255
return int(red), int(green), int(blue)
def tuple2hex(tuple_: TypeRGB) -> str:
""" Takes a color in tuple form an converts it to hex. """
return "#%02x%02x%02x" % tuple_
def str2list(string: str, type_func) -> list:
""" Takes a Python list-formatted string and returns a list of elements of type type_func """
return list(map(type_func, string.strip("()[]").split(",")))
def str2tuple(string: str, type_func) -> tuple:
""" Takes a Python list-formatted string and returns a tuple of type type_func """
return tuple(map(type_func, string.strip("()[]").split(",")))
# Multi monitor methods
@lru_cache(maxsize=None)
def get_primary_monitor() -> Tuple[int, int, int, int]:
""" Return the system's default primary monitor rectangle bounds. """
return [rect for rect in getDisplayRects() if rect[:2] == (0, 0)][
0
] # primary monitor has top left as 0, 0
def resource_path(relative_path) -> Union[int, bytes]:
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS # pylint: disable=protected-access,no-member
except Exception: # pylint: disable=broad-except
base_path = os.path.abspath("../")
return os.path.join(base_path, relative_path)
# Misc
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if "log_time" in kw:
name = kw.get("log_name", method.__name__.upper())
kw["log_time"][name] = int((te - ts) * 1000)
else:
print("%r %2.2f ms" % (method.__name__, (te - ts) * 1000))
return result
return timed
def getDisplayRects():
with mss.mss() as sct:
return [tuple(m.values()) for m in sct.monitors]
|
# -*- coding: utf-8 -*-
prices = {1: 4, 2: 4.5, 3: 5, 4: 2, 5: 1.5}
product, quantity = [int(x) for x in raw_input().split()]
print 'Total: R$ %.2f' % (prices[product] * quantity)
|
import mysql.connector
def create_connection ():
return mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "root",
database = "facebook-explorer",
port = 8889
)
|
import os
configurations = [
"Wall",
"Werror",
"Wextra",
"Wassign-enum",
"Wblock-capture-autoreleasing",
"Wbool-conversion",
"Wcomma",
"Wconditional-uninitialized",
"Wconstant-conversion",
"Wdeprecated-declarations",
"Wdeprecated-implementations",
"Wdeprecated-objc-isa-usage",
"Wduplicate-method-match",
"Wdocumentation",
"Wempty-body",
"Wenum-conversion",
"Wfatal-errors",
"Wfloat-conversion",
"Wheader-hygiene",
"Wincompatible-pointer-types",
"Wint-conversion",
"Winvalid-offsetof",
"Wnewline-eof",
"Wno-unknown-pragmas",
"Wnon-literal-null-conversion",
"Wnon-modular-include-in-framework-module",
"Wnon-virtual-dtor",
"Wobjc-literal-conversion",
"Wobjc-root-class",
"Wprotocol",
"Wshorten-64-to-32",
"Wstrict-prototypes",
"Wundeclared-selector",
"Wunreachable-code",
"Wunused-parameter",
]
current_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file_path = current_directory + "/IdentityCore/xcconfig/identitycore__common.xcconfig"
contents = []
with open(file_path, "r") as f:
contents = f.read().split("\n")
for i in range(len(contents)):
if contents[i] == "OTHER_CFLAGS=$(inherited) -fstack-protector-strong":
new_lines = []
for configuration in configurations:
new_lines.append("OTHER_CFLAGS=$(OTHER_CFLAGS) -" + configuration)
contents = contents[:i] + new_lines + contents[i:]
break
# write into file
with open(file_path, "w") as f:
f.write("\n".join(contents))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.