content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Name: csv_plot_scatter_matrix.py
# Description:
#
# Author: m.akei
# Copyright: (c) 2020 by m.na.akei
# Time-stamp: <2020-08-30 15:06:06>
# Licence:
# Copyright (c) 2021 Masaharu N. Akei
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# ----------------------------------------------------------------------
import argparse
import textwrap
import sys
import re
import json
from pathlib import Path
import plotly.express as px
import pandas as pd
VERSION = 1.0
def init():
arg_parser = argparse.ArgumentParser(description="plot scatter matrix",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''
example:
csv_plot_scatter_matrix.py --columns="ABC001","ABC002","ABC003" --category="ABC004" --output=test.png test_plot.csv
'''))
arg_parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(VERSION))
arg_parser.add_argument("--title", dest="TITLE", help="title of chart", type=str, metavar='TEXT', default="")
arg_parser.add_argument("--columns",
dest="COLUMNS",
help="list of names of columns with csv",
type=str,
metavar='COLUMNS,COLUMNS[,COLUMNS...]',
required=True)
arg_parser.add_argument("--category", dest="SYMBOL", help="name of column to make group", type=str, metavar='COLUMN', default=None)
arg_parser.add_argument("--category_orders",
dest="CATEG_ORDERS",
help="orders of elements in each category, with json format",
type=str,
metavar='JSON_STRING',
default=None)
arg_parser.add_argument("--output", dest="OUTPUT", help="path of output file", type=str, metavar="FILE")
arg_parser.add_argument("--format",
dest="FORMAT",
help="format of output, default=svg",
choices=["svg", "png", "jpg", "json", "html"],
default="svg")
arg_parser.add_argument("--packed_html",
dest="PACKED_HTML",
help="whether plotly.js is included in result html file, this is enable only for --format=html",
action="store_true")
arg_parser.add_argument("--width", dest="WIDTH", help="width of output", type=int, metavar='WIDTH', default=None)
arg_parser.add_argument("--height", dest="HEIGHT", help="height of output", type=int, metavar='HEIGHT', default=None)
arg_parser.add_argument('csv_file', metavar='CSV_FILE', help='csv files to read', nargs=1)
args = arg_parser.parse_args()
return args
if __name__ == "__main__":
color_limit = 10
args = init()
csv_file = args.csv_file[0]
output_format = args.FORMAT
width = args.WIDTH
height = args.HEIGHT
output_file = args.OUTPUT
packed_html = args.PACKED_HTML
if output_file is None:
if csv_file != "-":
output_file = Path(csv_file).stem + "_scatter_matrix." + output_format
else:
output_file = sys.stdout.buffer
else:
output_format = Path(output_file).suffix[1:]
if csv_file == "-":
csv_file = sys.stdin
title = args.TITLE
dimensions = re.split(r"\s*,\s*", args.COLUMNS)
symbol = args.SYMBOL
# Built-in Continuous Color Scales | Python | Plotly https://plotly.com/python/builtin-colorscales/#builtin-sequential-color-scales
color_scales = ["Inferno", "Viridis", "OrRd", "YlOrBr", "BuGn"]
color_scale = color_scales[4]
categ_orders_s = args.CATEG_ORDERS
categ_orders = {}
if categ_orders_s is not None:
try:
categ_orders = json.loads(categ_orders_s)
except json.decoder.JSONDecodeError as e:
print("??Error: '--category_orders' has invalid format: {}".format(e), file=sys.stderr)
print(categ_orders_s)
sys.exit(1)
#--- processing
fig_params = {"dimensions": dimensions, "opacity": 1.0}
# 2D Histograms | Python | Plotly https://plotly.com/python/2D-Histogram/
csv_df = pd.read_csv(csv_file)
if symbol is not None:
csv_df[symbol] = csv_df[symbol].astype(str, errors="ignore")
if len(list(csv_df[symbol].value_counts().items())) > color_limit:
fig_params.update({"symbol": symbol})
else:
fig_params.update({"symbol": symbol, "color": symbol})
if title is not None and len(title) > 0:
fig_params["title"] = title
if width is not None:
fig_params["width"] = int(width)
if height is not None:
fig_params["height"] = int(height)
figco = {}
for col in dimensions:
if csv_df[col].dtype in [str, object]:
figco[col] = sorted(csv_df[col].value_counts().keys())
if len(figco) > 0:
fig_params["category_orders"] = figco
if len(categ_orders) > 0:
if "category_orders" in fig_params:
fig_params["category_orders"].update(categ_orders)
else:
fig_params["category_orders"] = categ_orders
print("""
==== plot chart from csv
input : {}
output : {}
""".format(csv_file, output_file), file=sys.stderr)
print("parameters: {}".format(fig_params), file=sys.stderr)
# Scatterplot Matrix | Python | Plotly https://plotly.com/python/splom/
# plotly.express.scatter_matrix 4.9.0 documentation https://plotly.github.io/plotly.py-docs/generated/plotly.express.scatter_matrix.html
fig = px.scatter_matrix(csv_df, **fig_params)
fig.update_layout(coloraxis_colorbar=dict(yanchor="top", y=1, x=0, ticks="outside"))
fig.update_traces(diagonal_visible=False)
if output_format == "json":
if output_file == sys.stdout.buffer:
output_file = sys.stdout
fig.write_json(output_file)
elif output_format == "html":
if output_file == sys.stdout.buffer:
output_file = sys.stdout
if packed_html:
fig.write_html(output_file, include_plotlyjs=True, full_html=True)
else:
fig.write_html(output_file, include_plotlyjs='directory', full_html=True)
else:
fig.write_image(output_file, format=output_format)
|
def disable_irq():
pass
def enable_irq():
pass
def freq():
pass
mem16 = None
mem32 = None
mem8 = None
def reset():
pass
def time_pulse_us():
pass
def unique_id():
pass
|
from django.conf.urls import url
from django.views import generic
from django.test.utils import override_settings
from django_webtest import WebTest
from .. import forms
@override_settings(ROOT_URLCONF=__name__)
class Test(WebTest):
def test_default_usecase(self):
page = self.app.get('/demo/registration/')
form = page.form
form['username'] = 'admin'
form['email'] = 'admin@admin.com'
form['password'] = 'admin'
form['password_confirm'] = 'admin'
form['first_name'] = 'Super'
form['last_name'] = 'Admin'
form['gender'] = 'M'
form['receive_news'] = 1
form['agree_toc'] = 1
response = form.submit()
self.assertEquals(302, response.status_code)
def test_post_invalid_data(self):
page = self.app.get('/demo/registration/')
form = page.form
form['email'] = 'admin'
response = form.submit()
self.assertEquals('This field is required.', response.pyquery('#id_username_container .errors').text())
self.assertEquals('Enter a valid email address.', response.pyquery('#id_email_container .errors').text())
self.assertEquals('This field is required.', response.pyquery('#id_password_container .errors').text())
self.assertEquals('This field is required.', response.pyquery('#id_password_confirm_container .errors').text())
urlpatterns = [
url(r'^demo/registration/$', generic.FormView.as_view(
form_class=forms.RegistrationForm, success_url='/demo/registration/', template_name="demo.html")),
]
|
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Your very first example with Cornac"""
import cornac
from cornac.eval_methods import RatioSplit
from cornac.models import MF, PMF, BPR
from cornac.metrics import MAE, RMSE, Precision, Recall, NDCG, AUC, MAP
# load the built-in MovieLens 100K and split the data based on ratio
ml_100k = cornac.datasets.movielens.load_feedback()
rs = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)
# initialize models, here we are comparing: Biased MF, PMF, and BPR
models = [
MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, seed=123),
PMF(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001, seed=123),
BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123),
]
# define metrics to evaluate the models
metrics = [MAE(), RMSE(), Precision(k=10), Recall(k=10), NDCG(k=10), AUC(), MAP()]
# put it together in an experiment, voilà!
cornac.Experiment(eval_method=rs, models=models, metrics=metrics, user_based=True).run()
|
import asyncio
import pytest
@pytest.mark.asyncio
@asyncio.coroutine
def test_dependent_fixture(dependent_fixture):
"""Test a dependent fixture."""
yield from asyncio.sleep(0.1)
|
#!/usr/bin/env python3
import sys
import gitlab
import yaml
gl = gitlab.Gitlab.from_config('haskell')
proj = gl.projects.get(1)
existing = proj.labels.list(all=True)
existing = { label.name: label for label in existing }
for label in yaml.load(open('labels.yaml')):
print(label)
if label['name'] in existing:
l = existing[label['name']]
l.color = label['color']
l.description = label['description']
l.save()
else:
#proj.labels.create(label)
print("doesn't exist")
|
import os
import sys
import socket
import struct
import SocketServer
import threadpool
# fake ip list
FAKE_IPLIST = {}
# dns server config
TIMEOUT = 2 # set timeout 2 second
TRY_TIMES = 5 # try to recv msg times
DNS_SERVER = '8.8.8.8' # remote dns server
# currently not used
def bytetodomain(s):
domain = ''
i = 0
length = struct.unpack('!B', s[0:1])[0]
while length != 0:
i += 1
domain += s[i:i + length]
i += length
length = struct.unpack('!B', s[i:i+1])[0]
if length != 0:
domain += '.'
return (domain, i + 1)
def skip_query(query):
step = 0
length = struct.unpack('!B', query[0:1])[0]
while length != 0:
step = step + length + 1
length = struct.unpack('!B', query[step:step+1])[0]
return step + 1
def is_valid_pkt(response):
try:
(flag, qdcount, ancount) = struct.unpack('!HHH', response[2:8])
if flag != 0x8180 and flag != 0x8580:
return True
if 1 != qdcount or 1 != ancount:
return True
dlen = skip_query(response[12:])
pos = 12 + dlen
(qtype, qclass) = struct.unpack('!HH', response[pos:pos+4])
# qtype is 1 (mean query HOST ADDRESS), qclass is 1 (mean INTERNET)
if 1 != qtype or 1 != qclass:
return True
pos = pos + 4 # position for response
if ord(response[pos:pos+1]) & 0xc0:
pos = pos + 12
else:
pos = pos + dlen + 10
if response[pos:pos+4] in FAKE_IPLIST:
print('Match: ' + socket.inet_ntoa(response[pos:pos+4]))
return False
except Exception, e:
print(e)
return True
class ThreadPoolMixIn:
def process_request_thread(self, request, client_address):
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
self.tp.add_task(self.process_request_thread, request, client_address)
def serve_forever(self, poll_interval=0.5):
try:
SocketServer.UDPServer.serve_forever(self, poll_interval)
finally:
self.tp.stop()
class DNSFilter(ThreadPoolMixIn, SocketServer.UDPServer):
# much faster rebinding
allow_reuse_address = True
def __init__(self, s, t):
self.tp = threadpool.ThreadPool(20)
SocketServer.UDPServer.__init__(self, s, t)
class ThreadedUDPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
query_data = self.request[0]
udp_sock = self.request[1]
addr = self.client_address
response = self.dns_query(DNS_SERVER, 53, query_data)
if response:
# udp dns packet no length
udp_sock.sendto(response, addr)
def dns_query(self, dns_ip, dns_port, query_data):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(TIMEOUT) # set socket timeout = 5s
s.sendto(query_data, (dns_ip, dns_port))
for i in xrange(TRY_TIMES):
data, addr = s.recvfrom(1024)
if is_valid_pkt(data):
return data
else:
data = None
except:
return None
finally:
if s: s.close()
return data
if __name__ == '__main__':
print '---------------------------------------------------------------'
print '| To Use this tool, you must set your dns server to 127.0.0.1 |'
print '---------------------------------------------------------------'
# load config file, iplist.txt from https://github.com/clowwindy/ChinaDNS
with open('iplist.txt', 'rb') as f:
while 1:
ip = f.readline()
if ip:
FAKE_IPLIST[socket.inet_aton(ip[:-1])] = None
else:
break
dns_server = DNSFilter(('0.0.0.0', 53), ThreadedUDPRequestHandler)
try:
dns_server.serve_forever()
except:
pass
finally:
pass
|
import numpy as np
import torch
import cv2
from .deep.feature_extractor import Extractor
from .sort.nn_matching import NearestNeighborDistanceMetric
from .sort.preprocessing import non_max_suppression
from .sort.detection import Detection
from .sort.tracker import Tracker
__all__ = ['DeepSort']
class DeepSort(object):
def __init__(self, model_path, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
self.min_confidence = min_confidence
self.nms_max_overlap = nms_max_overlap
self.extractor = Extractor(model_path, use_cuda=use_cuda)
max_cosine_distance = max_dist
nn_budget = 100
metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
def update(self, bbox_xywh, confidences, ori_img):
self.height, self.width = ori_img.shape[:2]
# generate detections
features = self._get_features(bbox_xywh, ori_img) # CNN
# features = self._get_features_hog_paper(bbox_xywh, ori_img) # HOG paper
# features = self._get_features_hog(bbox_xywh, ori_img) # HOG
# features = self._get_features_sift(bbox_xywh, ori_img) # SIFT
bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)
detections = [Detection(bbox_tlwh[i], conf, features[i]) for i,conf in enumerate(confidences) if conf>self.min_confidence]
# run on non-maximum supression
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = non_max_suppression(boxes, self.nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# update tracker
self.tracker.predict()
self.tracker.update(detections)
# output bbox identities
outputs = []
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
box = track.to_tlwh()
x1,y1,x2,y2 = self._tlwh_to_xyxy(box)
track_id = track.track_id
outputs.append(np.array([x1,y1,x2,y2,track_id], dtype=np.int))
if len(outputs) > 0:
outputs = np.stack(outputs,axis=0)
return outputs
"""
TODO:
Convert bbox from xc_yc_w_h to xtl_ytl_w_h
Thanks JieChen91@github.com for reporting this bug!
"""
@staticmethod
def _xywh_to_tlwh(bbox_xywh):
if isinstance(bbox_xywh, np.ndarray):
bbox_tlwh = bbox_xywh.copy()
elif isinstance(bbox_xywh, torch.Tensor):
bbox_tlwh = bbox_xywh.clone()
bbox_tlwh[:,0] = bbox_xywh[:,0] - bbox_xywh[:,2]/2.
bbox_tlwh[:,1] = bbox_xywh[:,1] - bbox_xywh[:,3]/2.
return bbox_tlwh
def _xywh_to_xyxy(self, bbox_xywh):
x,y,w,h = bbox_xywh
x1 = max(int(x-w/2),0)
x2 = min(int(x+w/2),self.width-1)
y1 = max(int(y-h/2),0)
y2 = min(int(y+h/2),self.height-1)
return x1,y1,x2,y2
def _tlwh_to_xyxy(self, bbox_tlwh):
"""
TODO:
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
Thanks JieChen91@github.com for reporting this bug!
"""
x,y,w,h = bbox_tlwh
x1 = max(int(x),0)
x2 = min(int(x+w),self.width-1)
y1 = max(int(y),0)
y2 = min(int(y+h),self.height-1)
return x1,y1,x2,y2
def _xyxy_to_tlwh(self, bbox_xyxy):
x1,y1,x2,y2 = bbox_xyxy
t = x1
l = y1
w = int(x2-x1)
h = int(y2-y1)
return t,l,w,h
def _get_features(self, bbox_xywh, ori_img):
im_crops = []
for box in bbox_xywh:
x1,y1,x2,y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2,x1:x2]
im_crops.append(im)
if im_crops:
features = self.extractor(im_crops)
else:
features = np.array([])
return features
def _get_features_hog_paper(self, bbox_xywh, ori_img):
features = np.zeros((len(bbox_xywh),256)) # 512 256 128
for i,box in enumerate(bbox_xywh):
x1,y1,x2,y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2,x1:x2]
if im.shape[0]>0 and im.shape[1]>0:
image=cv2.resize(im, (64,128))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert the original image to gray scale
cell_size = (16, 16) # 512: (8, 16) 256: (16, 16) 128: (16, 32)
num_cells_per_block = (2, 2)
block_size = (num_cells_per_block[0] * cell_size[0],num_cells_per_block[1] * cell_size[1])
x_cells = gray_image.shape[1] // cell_size[0]
y_cells = gray_image.shape[0] // cell_size[1]
h_stride = 2
v_stride = 2
block_stride = (cell_size[0] * h_stride, cell_size[1] * v_stride)
num_bins = 8
win_size = (x_cells * cell_size[0] , y_cells * cell_size[1])
hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, num_bins)
feature = hog.compute(gray_image)
features[i]=feature.reshape(256) # 512 256 128
else:
features[i] = np.array([])
return features
def _get_features_hog(self, bbox_xywh, ori_img):
features = np.zeros((len(bbox_xywh),512)) # 512 256 128
for i,box in enumerate(bbox_xywh):
x1,y1,x2,y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2,x1:x2]
if im.shape[0]>0 and im.shape[1]>0:
image=cv2.resize(im, (64,128))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert the original image to gray scale
gx = cv2.Sobel(gray_image, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(gray_image, cv2.CV_32F, 0, 1)
height = gray_image.shape[0]
width = gray_image.shape[1]
split_index_h = int(height / 2)
split_index_w = int(width / 2)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 128 # 128:32 256:64 512:128
bin = np.int32(bin_n * ang / (2 * np.pi))
bin_cells = bin[:split_index_w, :split_index_h], bin[split_index_w:, :split_index_h], \
bin[:split_index_w, split_index_h:], bin[split_index_w:, split_index_h:]
mag_cells = mag[:split_index_w, :split_index_h], mag[split_index_w:, :split_index_h], \
mag[:split_index_w, split_index_h:], mag[split_index_w:, split_index_h:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= np.linalg.norm(hist) + eps
feature = np.float32(hist)
features[i]=feature.reshape(512) # 512 256 128
else:
features[i] = np.array([])
return features
def _get_features_sift(self, bbox_xywh, ori_img):
features = np.zeros((len(bbox_xywh),512)) # 512 256 128
for i,box in enumerate(bbox_xywh):
x1,y1,x2,y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2,x1:x2]
if im.shape[0]>0 and im.shape[1]>0:
image=cv2.resize(im, (64,128))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert the original image to gray scale
sift = cv2.xfeatures2d_SIFT.create(4)
if sift:
kp,des = sift.detectAndCompute(image, None) # des=4*128
feature=np.hstack(des)
print(feature.shape)
features[i]=feature[:512] # 512 256 128
else:
continue
else:
features[i] = np.array([])
return features
|
import wx
import cv2 as cv
from app.WebCam import WebCam
class OwlFrame(wx.Frame):
def __init__(self):
style = wx.STAY_ON_TOP | wx.RESIZE_BORDER | wx.CLOSE_BOX | wx.CAPTION
super(OwlFrame, self).__init__(None, title="OwlFrame", style=style)
self.pnl = wx.Panel(self)
# Set 30 fps for video
self.timer = wx.Timer(self)
self.timer.Start(1000./30.)
self.Bind(wx.EVT_TIMER, self.onUpdate, self.timer)
self.is_scanning_for_source = False
self.webcam = WebCam()
if not self.webcam.has_webcam():
print("No webcam found")
self.is_scanning_for_source = self.webcam.scan_for_source()
height, width = self.webcam.size()
self.SetSize(wx.Size(width, height))
self.pnl.Bind(wx.EVT_ERASE_BACKGROUND, self.onEraseBackground)
self.pnl.Bind(wx.EVT_PAINT, self.onPaint)
self.pnl.Bind(wx.EVT_KEY_UP, self.onKeyDown)
self.Bind(wx.EVT_CLOSE, self.onClose)
def onClose(self, event):
self.timer.Stop()
self.Destroy()
def onUpdate(self, event):
self.Refresh()
def onPaint(self, event):
if not self.is_scanning_for_source:
frame_w, frame_h = self.pnl.GetSize()
frame = self.webcam.get_image(frame_w, frame_h)
h, w = frame.shape[:2]
image = wx.Bitmap.FromBuffer(w, h, frame)
# Buffer the image
dc = wx.BufferedPaintDC(self.pnl)
dc.DrawBitmap(image, 0, 0)
# Avoid flickering
def onEraseBackground(self, event):
return
def onKeyDown(self, event):
if event.GetKeyCode() == wx.WXK_SPACE:
self.is_scanning_for_source = False
self.is_scanning_for_source = self.webcam.scan_for_source()
|
from typing import Dict, List, Iterable, Tuple
import transition
Transition = Dict[str, str]
MachineParams = Dict[str, str]
def is_machine_param(pairs: MachineParams) -> bool:
"""Checks if a list of pairs is a machine parameter."""
compiler_params = ['start', 'empty_symbol']
return any([key in compiler_params for key in pairs])
def get_pairs(line: List[str]) -> Dict[str, str]:
"""Collects key value pairs from a line.
There may be several key value pairs in one line. These are separated by spaces.
Example:
line = 'language python version 3.9 answer 42'
becomes
{
'language': 'python',
'version': '3.9',
'answer': 42'
}
"""
pairs = dict()
for i in range(0, len(line), 2):
key = line[i].lower()
value = line[i + 1]
pairs[key] = value
return pairs
def machine(lines: Iterable[str]) -> Tuple[List[Transition], MachineParams]:
"""Parses a machine into its transitions and parameters"""
transitions: List[Dict[str, str]] = list()
machine_params: Dict[str, str] = dict()
for line_nr, line in enumerate(lines):
split_line = line.split()
pairs = get_pairs(split_line)
if transition.valid(pairs):
transitions.append(pairs)
elif is_machine_param(pairs):
for key, value in pairs.items():
machine_params[key] = value
else:
print(f'ERROR at line {line_nr}: Unknown parameter')
exit()
return transitions, machine_params
|
import json, sys
nounwind_attributor = 0
nounwind_functionattrs = 0
with open(sys.argv[1]) as f:
data = json.load(f)
for key1, value1 in data.items():
if key1 != 'tests':
continue
for test in value1:
for key, value in test.items():
if key != 'metrics':
continue
for attr, strnum in value.items():
if attr.encode("ascii") == "attributor.NumFnNoUnwind":
num = float(strnum)
nounwind_attributor += num
elif(attr.encode("ascii") == "functionattrs.NumNoUnwind"):
num = float(strnum)
nounwind_functionattrs += num
print("attributor.NumFnNoUnwind: " + str(nounwind_attributor))
print("functionattrs.NumNoUnwind: " + str(nounwind_functionattrs))
|
import cProfile
import contextlib
import enum
import errno
import io
import json
import os
import pstats
import subprocess
import time
import ws.cworker
import ws.logs
import ws.sockets
import ws.signals
import ws.worker
from ws.config import config
from ws.err import *
from ws.logs import error_log, profile_log
SERVER_PROFILING_ON = config.getboolean('profiling', 'on_server')
WORKER_PROFILING_ON = config.getboolean('profiling', 'on_workers')
default_signal_handler_depreciated = ws.signals.raising_signal_handler
ws.signals.signal(ws.signals.SIGTERM, ws.signals.raising_signal_handler)
class Server:
class ExecutionContext(enum.Enum):
main = 'main'
worker = 'worker'
ActiveWorker = collections.namedtuple('ActiveWorker',
['pid', 'created_on', 'fd_transport'])
def __init__(self):
if not sys_has_fork_support():
raise SysError(code='FORK_NOT_IMPLEMENTED',
msg="Kernel or C lib versions don't have "
"fork() support.")
self.host = config['settings']['host']
self.port = config.getint('settings', 'port')
self.tcp_backlog_size = config.getint('settings', 'tcp_backlog_size')
self.process_count_limit = config.getint('settings',
'process_count_limit')
self.execution_context = self.ExecutionContext.main
self.sock = ws.sockets.ServerSocket(ws.sockets.AF_INET,
ws.sockets.SOCK_STREAM)
self.sock.setsockopt(ws.sockets.SOL_SOCKET, ws.sockets.SO_REUSEADDR, 1)
self.accepted_connections = 0
self.workers = {}
self.reaping = False
self.reaped_pids = set()
self.received_signals = set()
ws.signals.signal(ws.signals.SIGCHLD, self.reap_children)
ws.signals.signal(ws.signals.SIGUSR1, self.receive_signal)
def setup(self):
""" Bind socket and pre-fork workers. """
error_log.info('Binding server on %s:%s', self.host, self.port)
self.sock.bind((self.host, self.port))
self.fork_workers(self.process_count_limit)
def cleanup(self):
""" Closing listening socket and reap children.
This method sleeps for the maximum timeout of SIGTERM signal sent to
a worker. (Worker.sigterm_timeout)
"""
# don't cleanup workers because their sockets were already closed
# during the self.fork_worker() call
if self.execution_context == self.ExecutionContext.worker:
return
error_log.info("Closing server's listening socket")
try:
self.sock.close()
except OSError:
error_log.exception('close() on listening socket failed.')
ws.signals.signal(ws.signals.SIGCHLD, ws.signals.SIG_DFL)
active_workers = [worker for worker in self.workers.values()
if worker.pid not in self.reaped_pids]
for worker in active_workers:
worker.terminate()
if active_workers:
timeout = max(worker.sigterm_timeout for worker in active_workers)
error_log.info('Waiting %s seconds for children to finish.',
timeout)
time.sleep(timeout)
for worker in active_workers:
pid, exit = os.waitpid(worker.pid, os.WNOHANG)
if not pid:
worker.kill_if_hanged()
def __enter__(self):
error_log.depreciate('%s', self.__class__.__enter__.__name__)
self.setup()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
error_log.depreciate('%s', self.__class__.__exit__.__name__)
self.cleanup()
return False
def listen(self):
assert all(isinstance(w, WorkerProcess)
for w in self.workers.values())
assert len(self.workers) == self.process_count_limit
error_log.info('Listening with backlog %s...', self.tcp_backlog_size)
self.sock.listen(self.tcp_backlog_size)
while True:
# TODO add rate limiting on rate of clients sending connections
# instead of on the HTTP syntax (which will be deferred to workers)
try:
sock, address = self.sock.accept()
except OSError as err:
error_log.warning('accept() raised ERRNO=%s with MSG=%s',
err.errno, err.strerror)
# TODO perhaps reopen failed listening sockets.
assert err.errno not in (errno.EBADF, errno.EFAULT,
errno.EINVAL, errno.ENOTSOCK,
errno.EOPNOTSUPP)
# don't break the listening loop just because one accept failed
continue
self.accepted_connections += 1
passed = self.distribute_connection(client_socket=sock,
address=address)
if not passed:
# TODO fork and reply quickly with a 503
error_log.warning('Could not distribute connection %s / %s to '
'workers. Dropping connection.',
sock, address)
sock.close(pass_silently=True)
# duplicate the set so SIGCHLD handler doesn't cause problems
to_remove = frozenset(self.reaped_pids)
for pid in to_remove:
old_worker = self.workers.pop(pid)
old_worker.close_ipc()
self.reaped_pids.remove(pid)
# call outside of loop to avoid a race condition where a
# worker_process get's forked with a pid in self.reaped_pids
missing = self.process_count_limit - len(self.workers)
if missing > 0:
self.fork_workers(missing)
for worker_process in self.workers.values():
worker_process.kill_if_hanged()
if ws.signals.SIGUSR1 in self.received_signals:
for pid in self.workers:
ws.signals.kill(pid, ws.signals.SIGUSR1)
self.received_signals.remove(ws.signals.SIGUSR1)
def distribute_connection(self, client_socket, address):
for i, worker in enumerate(self.workers_round_robin()):
if not worker.can_work():
continue
try:
worker.send_connections([(client_socket, address)])
return True
except OSError as err:
if worker.pid in self.reaped_pids:
continue
error_log.warning('sending file descriptors to worker %s '
'raised ERRNO=%s with MSG=%s',
worker, err.errno, err.strerror)
worker.terminate()
return False
def workers_round_robin(self):
assert len(self.workers) > 0
workers = tuple(self.workers.values())
round_robin_offset = self.accepted_connections % len(workers)
first = workers[round_robin_offset:]
last = tuple(reversed(workers[:round_robin_offset]))
return first + last
def fork_workers(self, count=1):
error_log.info('Forking %s workers', self.process_count_limit)
assert isinstance(count, int)
for _ in range(count):
fd_transport = ws.sockets.FDTransport()
pid = os.fork()
if pid:
self.execution_context = self.ExecutionContext.main
error_log.debug('Forked worker with pid=%s', pid)
ws.sockets.randomize_ssl_after_fork()
fd_transport.mode = 'sender'
wp = WorkerProcess(pid=pid, fd_transport=fd_transport)
self.workers[wp.pid] = wp
else:
self.execution_context = self.ExecutionContext.worker
ws.signals.reset_handlers(excluding={ws.signals.SIGTERM})
ws.signals.signal(ws.signals.SIGCHLD, ws.signals.SIG_IGN)
# noinspection PyBroadException
try:
ws.logs.setup_worker_handlers()
fd_transport.mode = 'receiver'
for other_worker in self.workers.values():
other_worker.close_ipc()
self.sock.close()
os.close(0)
os.close(1)
os.close(2)
with profile(WORKER_PROFILING_ON):
worker = ws.worker.Worker(fd_transport=fd_transport)
exit_code = worker.work()
except BaseException:
error_log.exception('Worker failed.')
exit_code = 1
# noinspection PyProtectedMember
os._exit(exit_code)
# noinspection PyUnusedLocal
def receive_signal(self, signum, stack_frame):
self.received_signals.add(signum)
# noinspection PyUnusedLocal
def reap_children(self, signum, stack_frame):
# TODO use a lock
error_log.debug3('reap_children() called.')
if self.reaping:
return
self.reaping = True
try:
while True:
pid, exit_indicator = os.waitpid(-1, os.WNOHANG)
if pid:
error_log.debug('Reaped pid %s', pid)
assert pid not in self.reaped_pids
self.reaped_pids.add(pid)
else:
break
except OSError as err:
error_log.debug('During reaping of zombie child: wait() sys call '
'failed with ERRNO=%s and MSG=%s. This is mostly '
'not a problem.', err.errno, err.strerror)
finally:
self.reaping = False
class WorkerProcess:
sigterm_timeout = config.getint('settings', 'process_sigterm_timeout')
def __init__(self, pid, fd_transport):
self.pid = pid
self.fd_transport = fd_transport
self.created_on = time.time()
self.sent_sockets = 0
self.sent_sigterm_on = None
self.terminating = False
def send_connections(self, connections):
sockets, addresses = zip(*connections)
msg = bytes(json.dumps(addresses), encoding='utf-8')
fds = [cs.fileno() for cs in sockets]
self.fd_transport.send_fds(msg=msg, fds=fds)
self.sent_sockets += len(sockets)
return True
def can_work(self):
return not self.terminating
def terminate(self):
if self.terminating:
return
try:
self.sent_sigterm_on = time.time()
os.kill(self.pid, ws.signals.SIGTERM)
self.terminating = True
except OSError as err:
error_log.warning('Failed to sent SIGTERM to worker with pid %s. '
'ERRNO=%s and MSG=%s', err.errno, err.strerror)
def kill_if_hanged(self, now=None):
if not self.terminating:
return False
now = now or time.time()
if now - self.sent_sigterm_on > self.sigterm_timeout:
# don't fail if worker is already dead.
try:
ws.signals.kill(self.pid, ws.signals.SIGKILL)
except OSError as err:
error_log.warning('Killing worker with pid %s failed. '
'ERRNO=%s and MSG=%s',
err.errno, err.strerror)
return True
def close_ipc(self):
self.fd_transport.discard()
def __repr__(self):
return 'WorkerProcess(pid={})'.format(self.pid)
def sys_has_fork_support():
error_log.info('Checking if system has fork support by doing a '
'dummy fork...')
try:
pid = os.fork()
except OSError as err:
if err.errno == errno.ENOSYS:
error_log.critical('System does not have fork() support.')
return False
else:
return True
if pid == 0:
# noinspection PyProtectedMember
os._exit(0)
else:
error_log.info('Fork successful. Cleaning up dummy child '
'(pid={:d})...'.format(pid))
os.waitpid(pid, 0)
return True
@contextlib.contextmanager
def profile(enabled=True):
if not enabled:
yield
return
error_log.info('Starting profiling.')
profiler = cProfile.Profile()
profiler.enable()
profile_log.profile('Enabled profiling')
try:
yield
finally:
profiler.disable()
profile_log.profile('Disabled profiling')
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s)
ps = ps.sort_stats('cumulative')
ps.print_stats()
profile_log.profile('cProfiler results:\n %s', s.getvalue())
def main():
# Main process should never exit from the server.listen() loop unless
# an exception occurs.
fd_limit = subprocess.check_output(['ulimit', '-n'], shell=True)
error_log.info('ulimit -n is "%s"', fd_limit.decode('ascii'))
server = Server()
server.setup()
with profile(SERVER_PROFILING_ON):
# noinspection PyBroadException
try:
server.listen()
except SignalReceivedException as err:
if err.signum == ws.signals.SIGTERM:
error_log.info('SIGTERM signal broke listen() loop.')
else:
error_log.exception('Unknown signal broke listen() loop.')
except KeyboardInterrupt:
error_log.info('KeyboardInterrupt broke listen() loop.')
except BaseException:
error_log.exception('Unhandled exception broke listen() loop.')
finally:
server.cleanup()
|
import numpy as np
from numpy.testing import assert_array_equal
from cloudbusting.tools import *
def test_rle_to_mask():
#test1: null case
rle = []
shape = (5,6)
out = rle_to_mask(rle, shape)
expected = np.zeros([5,6], dtype=bool)
assert_array_equal(out, expected)
#test2
rle = [1, 3]
shape = (5,6)
out = rle_to_mask(rle, shape)
expected = np.zeros([5,6], dtype=bool)
expected[:3,0] = True
assert_array_equal(out, expected)
#test3
rle = [1, 3, 11, 5]
shape = (5,6)
out = rle_to_mask(rle, shape)
expected = np.zeros([5,6], dtype=bool)
expected[:3,0] = True
expected[:,2] = True
assert_array_equal(out, expected)
|
#-- THIS LINE SHOULD BE THE FIRST LINE OF YOUR SUBMISSION! --#
def all_truthy(values):
return all(values)
#-- THIS LINE SHOULD BE THE LAST LINE OF YOUR SUBMISSION! ---#
### DO NOT SUBMIT THE FOLLOWING LINES!!! THESE ARE FOR LOCAL TESTING ONLY!
assert(all_truthy([True, "yes", "no", 1, [{}]]))
assert(all_truthy([]))
assert(not all_truthy([True, "yes", 0.0])) |
import logging
log_name = 'runtime.log'
LOGGER = logging.getLogger(__name__)
fh = logging.FileHandler(encoding='utf-8', mode='a', filename=log_name)
logging.basicConfig(handlers=[fh], format='[%(asctime)s %(levelname)s]<%(process)d> %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO) |
from plugins import BasePlugin
from plugins import PluginsData
class ScreenDumpPlugin(BasePlugin):
def run(self, rule, data=None, rows_display_limit = 100):
if data and isinstance(data, PluginsData):
print 'Fields:'
for field in data.fields:
print ' {0}'.format(field)
print 'Values:'
for row in data.values[:rows_display_limit]:
print ' ', ', '.join([str(field) for field in row])
if len(data.values) > rows_display_limit:
print ' ... Showing {0} of {1} items'.format(rows_display_limit, len(data.values))
else:
pass
def init(rule):
return ScreenDumpPlugin(rule)
|
import os
import numpy
class BaseContainer(object):
filename = os.path.join(os.path.dirname(__file__), '../test_data.csv')
data = numpy.genfromtxt(filename, delimiter=',', skip_header=1)
X = numpy.array(data[:, 0:5])
y = numpy.array(data[:, 5])
|
import abc
import textwrap
class GitHook(metaclass=abc.ABCMeta):
"""
Base class to define a Git hook usable by `hooks` task.
"""
@abc.abstractmethod
def name(self):
"""
:rtype: unicode
:return: Name of hook.
"""
@abc.abstractmethod
def script(self):
"""
:rtype: unicode
:return: Script code. Omit the shebang, as it is added later by a post-process step when
hooks are installed in project.
"""
class FixFormatGitHook(GitHook):
"""
A hook that prevents developer from committing unless it respects formats expected by
our `fix-format` tool.
"""
def name(self):
return 'fix-format'
def script(self):
script = """\
if ! which fix-format >/dev/null 2>&1
then
echo "fix-format not found, install in an active environment with:"
echo " conda install esss_fix_format"
exit 1
else
git diff-index --diff-filter=ACM --name-only --cached HEAD | fix-format --check --stdin
returncode=$?
if [ "$returncode" != "0" ]
then
echo ""
echo "fix-format check failed (status=$returncode)! To fix, execute:"
echo " ff -c"
exit 1
fi
fi
"""
return textwrap.dedent(script)
def _add_hook(hook):
name = hook.name()
if name not in _HOOKS:
_HOOKS[name] = hook
else:
raise KeyError(f"A hook named '{name}' already exists")
# All hooks available by default
_HOOKS = {}
_add_hook(FixFormatGitHook())
def get_default_hook(name):
"""
:param unicode name: Name of a hook.
:rtype: GitHook
:return: A Git hook object.
"""
return _HOOKS[name]
|
from common.permissions.action_based_permissions import Action
from common.permissions.http_based_permissions import HTTP
from common.permissions.permissions import *
|
from keras.layers import concatenate, add, multiply
from numpy import average
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras import initializers as initializations
from tensorflow.keras import regularizers, constraints
from tensorflow.keras.layers import Add, Average, Concatenate, Maximum, Multiply
from tensorflow_core import maximum
class Attention_layer(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
"""
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention_layer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
super(Attention_layer, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = K.dot(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
a = K.exp(uit)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
return input_shape[0], input_shape[-1]
class AttentionM(Layer):
"""
Keras layer to compute an attention vector on an incoming matrix.
# Input
enc - 3D Tensor of shape (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# Output
2D Tensor of shape (BATCH_SIZE, EMBED_SIZE)
# Usage
enc = LSTM(EMBED_SIZE, return_sequences=True)(...)
att = AttentionM()(enc)
"""
def __init__(self, **kwargs):
super(AttentionM, self).__init__(**kwargs)
def build(self, input_shape):
# W: (EMBED_SIZE, 1)
# b: (MAX_TIMESTEPS,)
self.W = self.add_weight(name="W_{:s}".format(self.name),
shape=(input_shape[-1], 1),
initializer="normal")
self.b = self.add_weight(name="b_{:s}".format(self.name),
shape=(input_shape[1], 1),
initializer="zeros")
super(AttentionM, self).build(input_shape)
def call(self, x, mask=None):
# input: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# et: (BATCH_SIZE, MAX_TIMESTEPS)
et = K.squeeze(K.tanh(K.dot(x, self.W) + self.b), axis=-1)
# at: (BATCH_SIZE, MAX_TIMESTEPS)
at = K.softmax(et)
if mask is not None:
at *= K.cast(mask, K.floatx())
# atx: (BATCH_SIZE, MAX_TIMESTEPS, 1)
atx = K.expand_dims(at, axis=-1)
# ot: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
ot = x * atx
# output: (BATCH_SIZE, EMBED_SIZE)
return K.sum(ot, axis=1)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
def get_config(self):
return super(AttentionM, self).get_config()
class AttentionMC(Layer):
"""
Keras layer to compute an attention vector on an incoming matrix
using a learned context vector.
# Input
enc - 3D Tensor of shape (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# Output
2D Tensor of shape (BATCH_SIZE, EMBED_SIZE)
# Usage
enc = Bidirectional(GRU(EMBED_SIZE,return_sequences=True))(...)
att = AttentionMC()(enc)
"""
def __init__(self, **kwargs):
super(AttentionMC, self).__init__(**kwargs)
def build(self, input_shape):
# W: (EMBED_SIZE, 1)
# b: (MAX_TIMESTEPS, 1)
# u: (MAX_TIMESTEPS, MAX_TIMESTEPS)
self.W = self.add_weight(name="W_{:s}".format(self.name),
shape=(input_shape[-1], 1),
initializer="normal")
self.b = self.add_weight(name="b_{:s}".format(self.name),
shape=(input_shape[1], 1),
initializer="zeros")
self.u = self.add_weight(name="u_{:s}".format(self.name),
shape=(input_shape[1], input_shape[1]),
initializer="normal")
super(AttentionMC, self).build(input_shape)
def call(self, x, mask=None):
# input: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# et: (BATCH_SIZE, MAX_TIMESTEPS)
et = K.squeeze(K.tanh(K.dot(x, self.W) + self.b), axis=-1)
# at: (BATCH_SIZE, MAX_TIMESTEPS)
at = K.softmax(K.dot(et, self.u))
if mask is not None:
at *= K.cast(mask, K.floatx())
# ot: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
atx = K.expand_dims(at, axis=-1)
ot = atx * x
# output: (BATCH_SIZE, EMBED_SIZE)
return K.sum(ot, axis=1)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def compute_output_shape(self, input_shape):
# output shape: (BATCH_SIZE, EMBED_SIZE)
return (input_shape[0], input_shape[-1])
def get_config(self):
return super(AttentionMC, self).get_config()
class AttentionMV(Layer):
"""
Keras layer to compute an attention vector on an incoming matrix
and a user provided context vector.
# Input
enc - 3D Tensor of shape (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
ctx - 2D Tensor of shape (BATCH_SIZE, EMBED_SIZE) (optional)
# Output
2D Tensor of shape (BATCH_SIZE, EMBED_SIZE)
# Usage
enc = Bidirectional(GRU(EMBED_SIZE,return_sequences=True))(...)
# with user supplied vector
ctx = GlobalAveragePooling1D()(enc)
att = AttentionMV()([enc, ctx])
"""
def __init__(self, **kwargs):
super(AttentionMV, self).__init__(**kwargs)
def build(self, input_shape):
assert type(input_shape) is list and len(input_shape) == 2
# W: (EMBED_SIZE, 1)
# b: (MAX_TIMESTEPS, 1)
# U: (EMBED_SIZE, MAX_TIMESTEPS)
self.W = self.add_weight(name="W_{:s}".format(self.name),
shape=(input_shape[0][-1], 1),
initializer="normal")
self.b = self.add_weight(name="b_{:s}".format(self.name),
shape=(input_shape[0][1], 1),
initializer="zeros")
self.U = self.add_weight(name="U_{:s}".format(self.name),
shape=(input_shape[0][-1],
input_shape[0][1]),
initializer="normal")
super(AttentionMV, self).build(input_shape)
def call(self, xs, mask=None):
# input: [x, u]
# x: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# u: (BATCH_SIZE, EMBED_SIZE)
x, c = xs
# et: (BATCH_SIZE, MAX_TIMESTEPS)
et = K.dot(c, self.U) + K.squeeze((K.dot(x, self.W) + self.b), axis=-1)
# at: (BATCH_SIZE, MAX_TIMESTEPS)
at = K.softmax(et)
if mask is not None and mask[0] is not None:
at *= K.cast(mask, K.floatx())
# ot: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
atx = K.expand_dims(at, axis=-1)
ot = atx * x
# output: (BATCH_SIZE, EMBED_SIZE)
return K.sum(ot, axis=1)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def compute_output_shape(self, input_shape):
# output shape: (BATCH_SIZE, EMBED_SIZE)
return (input_shape[0][0], input_shape[0][-1])
def get_config(self):
return super(AttentionMV, self).get_config()
class AttentionMM(Layer):
"""
Keras layer to compute an attention vector on a pair of incoming
matrices.
# Input
m1 - 3D Tensor of shape (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
m2 - 3D Tensor of shape (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
merge_mode - one of concat, diff, prod, avg or max.
# Output
if merge_mode == "concat":
2D Tensor of shape (BATCH_SIZE, EMBED_SIZE*2)
else:
2D Tensor of shape (BATCH_SIZE, EMBED_SIZE)
# Usage
enc1 = LSTM(EMBED_SIZE, return_sequences=True)(...)
enc2 = LSTM(EMBED_SIZE, return_sequences=True)(...)
att = AttentionMM("concat")([enc1, enc2])
att = BatchNormalization()(att)
"""
def __init__(self, merge_mode, **kwargs):
self.merge_mode = merge_mode
assert self.merge_mode in set(["concat", "diff", "prod", "avg", "max"])
super(AttentionMM, self).__init__(**kwargs)
def build(self, input_shape):
assert type(input_shape) is list and len(input_shape) == 2
assert input_shape[0] == input_shape[1]
# W1: (EMBED_SIZE, 1)
# b1: (MAX_TIMESTEPS, 1)
# W2: (EMBED_SIZE, 1)
# b2: (MAX_TIMESTEPS, 1)
# W3: (EMBED_SIZE, EMBED_SIZE)
# b3: (MAX_TIMESTEPS, EMBED_SIZE)
# W4: (EMBED_SIZE, EMBED_SIZE)
# b4: (MAX_TIMESTEPS, EMBED_SIZE)
# U1: (EMBED_SIZE, MAX_TIMESTEPS)
# U2: (EMBED_SIZE, MAX_TIMESTEPS)
self.embed_size = input_shape[0][-1]
self.max_timesteps = input_shape[0][1]
self.W1 = self.add_weight(name="W1_{:s}".format(self.name),
shape=(self.embed_size, self.embed_size),
initializer="normal")
self.b1 = self.add_weight(name="b1_{:s}".format(self.name),
shape=(self.max_timesteps, self.embed_size),
initializer="zeros")
self.W2 = self.add_weight(name="W2_{:s}".format(self.name),
shape=(self.embed_size, self.embed_size),
initializer="normal")
self.b2 = self.add_weight(name="b2_{:s}".format(self.name),
shape=(self.max_timesteps, self.embed_size),
initializer="zeros")
self.U1 = self.add_weight(name="U1_{:s}".format(self.name),
shape=(self.embed_size, self.embed_size),
initializer="normal")
self.U2 = self.add_weight(name="U2_{:s}".format(self.name),
shape=(self.embed_size, self.embed_size),
initializer="normal")
self.V1 = self.add_weight(name="V1_{:s}".format(self.name),
shape=(self.embed_size, self.embed_size),
initializer="normal")
self.V2 = self.add_weight(name="V2_{:s}".format(self.name),
shape=(self.embed_size, self.embed_size),
initializer="normal")
self.b3 = self.add_weight(name="b3_{:s}".format(self.name),
shape=(self.max_timesteps, self.embed_size),
initializer="zeros")
self.b4 = self.add_weight(name="b4_{:s}".format(self.name),
shape=(self.max_timesteps, self.embed_size),
initializer="zeros")
super(AttentionMM, self).build(input_shape)
def call(self, xs, mask=None):
assert len(xs) == 2
# separate out input matrices
# x1.shape == (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# x2.shape == (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
x1, x2 = xs
# build alignment matrix
# e1t, e2t: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# et: (BATCH_SIZE, MAX_TIMESTEPS, MAX_TIMESTEPS)
e1t = K.relu(K.dot(x1, self.W1) + self.b1)
e2t = K.relu(K.dot(x2, self.W2) + self.b2)
et = K.softmax(K.batch_dot(e2t, e1t, axes=(2, 2)))
# align inputs
# a1t, a2t: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
a1t = K.batch_dot(et, x2, axes=(1, 1))
a2t = K.batch_dot(et, x1, axes=(2, 1))
# produce aligned outputs
# o1t, o2t: (BATCH_SIZE, MAX_TIMESTEPS*2, EMBED_SIZE)
o1t = K.relu(K.dot(x1, self.U1) + K.dot(a1t, self.V1) + self.b3)
o2t = K.relu(K.dot(x2, self.U2) + K.dot(a2t, self.V2) + self.b4)
if mask is not None and mask[0] is not None:
o1t *= K.cast(mask, K.floatx())
if mask is not None and mask[1] is not None:
o1t *= K.cast(mask, K.floatx())
# o1, o2: (BATCH_SIZE, EMBED_SIZE)
o1 = K.mean(o1t, axis=1)
o2 = K.mean(o2t, axis=1)
# merge the attention vectors according to merge_mode
if self.merge_mode == "concat":
return concatenate([o1, o2], axis=1)
elif self.merge_mode == "diff":
return add([o1, -o2])
elif self.merge_mode == "prod":
return multiply([o1, o2])
elif self.merge_mode == "avg":
return average([o1, o2])
else: # max
return maximum([o1, o2])
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def compute_output_shape(self, input_shape):
if self.merge_mode == "concat":
# output shape: (BATCH_SIZE, EMBED_SIZE*2)
return (input_shape[0][0], input_shape[0][2] * 2)
else:
# output shape: (BATCH_SIZE, EMBED_SIZE)
return (input_shape[0][0], input_shape[0][2])
def get_config(self):
config = {"merge_mode": self.merge_mode}
base_config = super(AttentionMM, self).get_config()
return dict(list(base_config.items()) + list(config.items())) |
"""This module implements the Luhn algorithm.
It is used to check validity of various identification numbers.
For example: credit card number, Canadian Social Insurance Numbers,
survey codes on MacDonald and Taco Bell, etc.
Note: It is not intended to be a cryptographically secure hash
function; it was designed to protect against accidental errors,
not malicious attacks.
"""
class Luhn: # supress too-few public methods warning; pylint: disable=R0903
"""check validity of various identification numbers using Luhn algorithm.
Attributes:
card_num: string representing identification number,
i.e the number to be verified.
"""
def __init__(self, card_num: str) -> None:
"""Initalize Luhn with identification number.
Args:
card_num: number to be verified
"""
self.card_num = card_num
self._is_valid = self._validate(
stripped_num=self.card_num.replace(" ", "")[::-1]
)
@staticmethod
def _validate(stripped_num: str) -> bool:
"""Validates identification number.
Args:
stripped_num: card_num but reversed and devoid
of spaces.
"""
if len(stripped_num) <= 1 or not stripped_num.isdecimal():
return False
sum_ = 0
for i, digit in enumerate(stripped_num, start=1):
digit = int(digit)
if i % 2 == 0:
mul = digit * 2
sum_ += mul - 9 if mul > 9 else mul
else:
sum_ += digit
return sum_ % 10 == 0
def valid(self) -> bool:
"""Returns whether identification number is valid or not."""
return self._is_valid
|
__author__ = "Manuel Escriche <mev@tid.es>"
import os
import base64
import requests
import xlsxwriter
import re
from datetime import date, datetime
from xlsxwriter.utility import xl_range
from operator import attrgetter
from collections import namedtuple
from kernel.Settings import settings
from kernel.SheetFormats import SpreadsheetFormats
from kernel.TrackerBook import trackersBookByKey
from kernel.IssuesList import IssuesList
class Connector:
url_api = {
'session': '/rest/auth/1/session',
'project': '/rest/api/latest/project',
'component': '/rest/api/latest/component/',
'search': '/rest/api/latest/search'
}
_singlenton = None
def __new__(cls, *args, **kwargs):
if not cls._singlenton:
cls._singlenton = super(Connector, cls).__new__(cls, *args, **kwargs)
return cls._singlenton
def __init__(self):
self.jiraSession = self._connect(settings.server['JIRA'])
self.jiraTestSession = self._connect(settings.server['JIRATEST'])
def _connect(self, server):
username = server.username
password = server.password
auth = '{}:{}'.format(username, password)
keyword = base64.b64encode(bytes(auth, 'utf-8'))
access_key = str(keyword)[2:-1]
headers = {'Content-Type': 'application/json', "Authorization": "Basic {}".format(access_key)}
root_url = 'http://{}'.format(server.domain)
session = requests.session()
url = '{}{}'.format(root_url, Connector.url_api['session'])
try:
session.get(url, headers = headers, verify=False)
except Exception:
raise Exception
session.headers.update({'Content-Type': 'application/json'})
return session
def _search(self, server, params):
root_url = 'http://{}'.format(server.domain)
url = '{}{}'.format(root_url, Connector.url_api['search'])
try:
answer = self.jiraSession.get(url, params=params, verify=False)
except Exception:
raise Exception
return answer.json()
def search(self, server, params):
data = self._search(settings.server[server], params)
return data
class IssuesFactory:
fields = 'summary,status,project,components,priority,issuetype,description,reporter,' \
'resolution,assignee,created,updated,duedate,resolutiondate,fixVersions,releaseDate,issuelinks'
_singlenton = None
def __new__(cls, *args, **kwargs):
if not cls._singlenton:
cls._singlenton = super(IssuesFactory, cls).__new__(cls, *args, **kwargs)
return cls._singlenton
def __init__(self):
self.connector = Connector()
def get_helpdesk(self, request):
tracker = trackersBookByKey['HELP'].keystone
startAt = 0
if request == 'recovery':
payloadTest = { 'fields': IssuesFactory.fields,
'maxResults':1000, 'startAt':startAt,
'jql':"created >= 2015-03-04 AND created <= 2015-05-12 AND project = {}".format(tracker) }
payloadMain = { 'fields': IssuesFactory.fields,
'maxResults':1000, 'startAt':startAt,
'jql':"project = {}".format(tracker) }
try:
# raise Exception
data = self.connector.search('JIRATEST',payloadTest)
testHelpDeskRecoveryList = IssuesList.fromData('helpdesktest.recovery', data['issues'])
data = self.connector.search('JIRA', payloadMain)
totalIssues, receivedIssues = data['total'], len(data['issues'])
while totalIssues > receivedIssues:
payloadMain['startAt'] = receivedIssues
try:
data['issues'].extend(self.connector.search('JIRA', payloadMain)['issues'])
except Exception: raise Exception
receivedIssues = len(data['issues'])
actualHelpDeskRecoveryList = IssuesList.fromData('helpdesk.recovery', data['issues'])
except Exception:
testHelpDeskRecoveryList = IssuesList.fromFile('helpdesktest.recovery')
actualHelpDeskRecoveryList = IssuesList.fromFile('helpdesk.recovery')
return actualHelpDeskRecoveryList, testHelpDeskRecoveryList
def get_coachhelpdesk(self, request):
tracker = 'HELC'
startAt = 0
if request == 'recovery':
payloadTest = { 'fields': IssuesFactory.fields,
'maxResults':1000, 'startAt':startAt,
'jql':"created >= 2015-03-04 AND created <= 2015-05-12 AND project = {}".format(tracker) }
payloadMain = { 'fields': IssuesFactory.fields,
'maxResults':1000, 'startAt':startAt,
'jql':"project = {}".format(tracker) }
try:
# raise Exception
data = self.connector.search('JIRATEST',payloadTest)
testHelpDeskRecoveryList = IssuesList.fromData('coachhelpdesktest.recovery', data['issues'])
data = self.connector.search('JIRA', payloadMain)
totalIssues, receivedIssues = data['total'], len(data['issues'])
while totalIssues > receivedIssues:
payloadMain['startAt'] = receivedIssues
try:
data['issues'].extend(self.connector.search('JIRA', payloadMain)['issues'])
except Exception: raise Exception
receivedIssues = len(data['issues'])
actualHelpDeskRecoveryList = IssuesList.fromData('coachhelpdesk.recovery', data['issues'])
except Exception:
testHelpDeskRecoveryList = IssuesList.fromFile('coachhelpdesktest.recovery')
actualHelpDeskRecoveryList = IssuesList.fromFile('coachhelpdesk.recovery')
return actualHelpDeskRecoveryList, testHelpDeskRecoveryList
class Report:
channels = {'Lab': 'Fiware-lab-help',
'Tech': 'Fiware-tech-help',
'General': 'Fiware-general-help',
'Feedback': 'Fiware-feedback',
'Collaboration': 'Fiware-collaboration-request',
'Speakers': 'Fiware-speakers-request',
'Ops': 'Fiware-ops-help',
'Coach': 'Fiware[\w-]+coaching',
'Other': '.',
'CEED Tech': 'Fiware-ceedtech-coaching',
'CreatiFI': 'Fiware-creatifi-coaching',
'EuropeanPioneers': 'Fiware-europeanpioneers-coaching',
'FABulous': 'Fiware-fabulous-coaching',
'FI-ADOPT': 'Fiware-fiadopt-coaching',
'FI-C3': 'Fiware-fic3-coaching',
'FICHe': 'fiware-fiche-coaching',
'Finish': 'Fiware-finish-coaching',
'FINODEX': 'Fiware-finodex-coaching',
'FRACTALS': 'Fiware-fractals-coaching',
'FrontierCities': 'Fiware-frontiercities-coaching',
'IMPACT': 'Fiware-impact-coaching',
'INCENSe': 'Fiware-incense-coaching',
'SmartAgriFood2': 'Fiware-smartagrifood-coaching',
'SOUL-FI': 'Fiware-soulfi-coaching',
'SpeedUp Europe': 'Fiware-speedup-coaching'}
class Issue:
def __init__(self, item, where):
self.lid = '{}-{}'.format(where, item.key)
self.key = item.key
self.created = item.created
self.instance = where
self.reference = re.sub(r'\s+',' ',item.reference)
self.description = item.description
self.assignee = item.assignee
self.url = item.url
self.reporter = item.reporter
# print(self.reporter)
self.status = item.status
try:
self.channel = [channel for channel in Report.channels
if re.search(r'\[{}\]'.format(Report.channels[channel]), item.reference)][0]
except Exception: self.channel = 'Other'
channelPattern = Report.channels[self.channel]
try:
self.type = 'NewIssue' if not re.search(r'T?R[VeE]?:\s+.*\[{}\]'.format(channelPattern), item.reference) else 'Comment'
except Exception: self.type = 'NewIssue'
# print(self.reference)
topic = re.sub(r'T?R[VeE]?:|AW:|I:','', self.reference)
topic = re.sub(r'F[Ww]d?:','', topic)
topic = re.sub(r'\[FI-WARE-JIRA\]|\[FIWARE Lab\]','', topic)
self.topic = re.sub(r'(\[Fiware.*?\])*', '', topic).strip()
# print(self.topic)
# self.topic = self.topic.strip()
# print(self.topic, '\n' )
self.sender = self.reporter
if self.reporter == 'FW External User':
try:
self.sender = self._getCreatedByInNew(item.description) \
if self.type == 'NewIssue' \
else self._getCreatedByinComment(item.description)
except Exception:
pass
# print(item.reference, item.reporter)
# print(item)
self.sons = []
self.father = []
self.p_sons = []
self.p_fathers = []
def __repr__(self):
return '{}'.format(self.lid)
def _getCreatedByInNew(self, data):
# print(type(data))
mfilter = re.compile(r'\[Created via e-mail received from:\s+(?P<sender>.*)\s+<(?P<email>.*@.*)>\]')
if mfilter.search(data):
sender = mfilter.search(data).group('sender')
email = mfilter.search(data).group('email')
output = [email]
else:
pass
output = self._getCreatedByinComment(data)
# print(output)
return output
def _getCreatedByinComment(self, data):
mfilter = re.compile(r'[^ :<+="\t\r\n\]\[]+@[\w.-]+\.[a-zA-Z]+')
output = re.findall(mfilter, data)
if len(output):
output = [item for item in output if not 'gif' in item ]
output = [item for item in output if not 'fi-ware' in item]
output = [item for item in output if not 'fiware' in item]
output = [item for item in output if not 'github' in item]
# output = [item for item in output if not 'carrillo' in item]
output = list(set(output))
else:
raise Exception
return output
def __init__(self):
self.factory = IssuesFactory()
data = self.factory.get_helpdesk('recovery')
# data = self.factory.get_coachhelpdesk('recovery')
self.actualInstanceData = data[0]
self.testInstanceData = data[1]
self.data = [Report.Issue(item, 'MAIN') for item in data[0]]
self.data += [Report.Issue(item, 'TEST') for item in data[1]]
# for item in self.data:
# print (item)
# print(len(self.data))
def _report_channel(self, channel):
data = [item for item in self.data if item.channel == channel]
__data = {item.lid:item for item in data}
ws = self.workbook.add_worksheet(channel)
ws.set_zoom(80)
ws.set_column(0, 0, 12)
ws.set_column(1, 1, 5)
ws.set_column(2, 3, 10)
ws.set_column(4, 5, 200)
ws.set_column(6, 10, 15)
row, col = 0, 0
ws.merge_range(xl_range(row, 0, row, 5), "HELP DESK Recovery Report", self.spFormats.chapter_title)
ws.set_row(0, 40)
ws.insert_image(0, 4, settings.logoAzul, {'x_scale': 0.75, 'y_scale': 0.75, 'x_offset': 400, 'y_offset': 5})
row += 1
ws.write(row, 0, 'Report Date:', self.spFormats.bold_right)
ws.write(row, 1, date.today().strftime('%d-%m-%Y'))
row += 1
ws.write(row, 0, 'CHANNEL = ', self.spFormats.bold_right)
# ws.write(row, 1, channel, self.spFormats.bold_red)
ws.merge_range(xl_range(row, 1, row, 2), channel, self.spFormats.bold_red)
row += 1
ws.write(row, 0, 'MAIN =', self.spFormats.bold_right)
ws.write(row, 1, '# {} issues'.format(len([item for item in data if item.instance == 'MAIN'])))
row += 1
ws.write(row, 0, 'TEST =', self.spFormats.red_bold_right)
ws.write(row, 1, '# {} issues'.format(len([item for item in data if item.instance == 'TEST'])))
row += 1
# ws.write(row, 1, 'NEW = ', self.spFormats.red_bold_right)
ws.merge_range(xl_range(row, 0, row, 1), 'NEW = ', self.spFormats.red_bold_right)
ws.write(row, 2, '# {} issues'
.format(len([item for item in data if item.instance == 'TEST' and item.type == 'NewIssue'])))
row += 1
# ws.write(row, 1, 'COMMENTS =', self.spFormats.blue)
ws.merge_range(xl_range(row, 0, row, 1), 'COMMENTS =', self.spFormats.right_bold_green)
ws.write(row, 2, '# {} issues'
.format(len([item for item in data if item.instance == 'TEST' and item.type == 'Comment'])))
row += 1
issuesInTestInstance = [item for item in data if item.instance == 'TEST' and item.type == 'NewIssue']
commentsInTestInstance = [item for item in data if item.instance == 'TEST' and item.type == 'Comment']
print(channel, len(data), ' Issues =',len(issuesInTestInstance), ' Comments= ', len(commentsInTestInstance))
comments = [item for item in data if item.type == 'Commnent']
newIssues = [item for item in data if item.type == 'NewIssue']
for item in issuesInTestInstance:
item.sons = [_item for _item in comments if item.topic == _item.topic and any(email in _item.sender for email in item.sender )]
if len(item.sons):
for _item in item.sons: _item.father = [item,]
else:
item.p_sons = \
[_item for _item in comments if item.topic == _item.topic and item.created <= _item.created]
for item in [item for item in commentsInTestInstance if not len(item.father)]:
item.p_fathers = \
[_item for _item in newIssues if _item.topic == item.topic and _item.created <= item.created]
if len(item.p_fathers):
item.father = \
[_item for _item in item.p_fathers if any(email in _item.sender for email in item.sender)]
for _item in item.father:
if _item.sons:
_item.sons.append(item)
else:
_item.sons = [item,]
issuesToMigrate = []
issuesToMigrate.extend([item for item in issuesInTestInstance if not len(item.sons)])
issuesToMigrate.extend([item for item in commentsInTestInstance if not len(item.father)])
for item in [item for item in commentsInTestInstance if len(item.father)]:
# issuesToMigrate.append(item)
for _item in item.father:
if _item.instance == 'TEST':
issuesToMigrate.append(_item)
row += 3
ws.merge_range(xl_range(row, 0, row, 5),'ISSUES TO MIGRATE', self.spFormats.bigSection)
row += 1
ws.write_row(row, 0, ('Created', 'Where', 'Key', 'Status', 'Summary'), self.spFormats.column_heading)
row += 1
k = 0
for k, item in enumerate(sorted(issuesToMigrate, key=attrgetter('created')), start=1):
row += 1
self._write_out(ws, row, item)
row += 1
ws.write(row, 0, '#items = {}'.format(k), self.spFormats.red)
row += 3
ws.merge_range(xl_range(row, 0, row, 5), 'FINDINGS', self.spFormats.bigSection)
row += 1
ws.write_row(row, 0, ('Created', 'Where', 'Key', 'Status', 'Summary'), self.spFormats.column_heading)
k = 0
for k, item in enumerate(sorted([item for item in issuesInTestInstance], key=attrgetter('created')), start=1):
row += 1
self._write_out(ws, row, item)
for _item in item.sons:
if _item.instance == 'TEST': continue
row += 1
self._write_out(ws, row, _item)
row += 1
ws.write(row, 0, '#items = {}'.format(k), self.spFormats.red)
row += 2
ws.write_row(row, 0, ('Created', 'Where', 'Key', 'Status', 'Summary'), self.spFormats.column_heading)
k = 0
for k, item in enumerate(sorted([item for item in commentsInTestInstance if len(item.father)], key=attrgetter('created')), start=1):
row += 1
self._write_out(ws, row, item)
for _item in item.father:
row += 1
self._write_out(ws, row, _item)
if not len(item.father):
for _item in item.p_fathers:
row += 1
self._write_out(ws, row, _item, False)
row += 1
row += 1
ws.write(row, 0, '#items = {}'.format(k), self.spFormats.red)
row += 2
ws.write_row(row, 0, ('Created', 'Where', 'Key', 'Status', 'Summary'), self.spFormats.column_heading)
row += 1
k = 0
for k, item in enumerate(sorted([item for item in commentsInTestInstance if not len(item.father)], key=attrgetter('created')), start=1):
row += 1
self._write_out(ws, row, item)
# for _item in item.p_fathers:
# row += 1
# self._write_out(ws, row, _item, False)
row += 1
ws.write(row, 0, '#items = {}'.format(k), self.spFormats.red)
return
seed = 'Henning Sprang'
for item in data:
match = re.search(seed, item.description)
if match:
print(item.lid, item.key)
def _write_out(self, ws, row, item, p=True):
get_link = lambda a: 'http://130.206.80.89/browse/{}'.format(a)
black = self.workbook.add_format({'font_color': 'black'}) \
if p else self.workbook.add_format({'font_color': 'black', 'bg_color': 'yellow'})
ws.write(row, 0, item.created.strftime("%d-%m-%Y"), black)
if item.instance == 'TEST':
ws.write(row, 1, item.instance, self.spFormats.red)
ws.write_url(row, 2, get_link(item.key), self.spFormats.link, item.key)
if item.type == 'Comment':
ws.write(row, 3, 'Comment', self.spFormats.green)
elif item.type == 'NewIssue':
ws.write(row, 3, 'New', self.spFormats.red)
else: pass
else:
ws.write(row, 1, item.instance)
ws.write_url(row, 2, item.url, self.spFormats.link, item.key)
ws.write(row, 3, item.status)
ws.write(row, 4, '{}\nCreated on {} by {}\nFather = {}, Sons = {}'
.format(item.reference, item.created, item.sender, item.father, item.sons))
def _verify_channel(self, channel):
data = [item for item in self.data if item.channel == channel]
__data = {item.lid: item for item in data}
ws = self.workbook.add_worksheet(channel)
ws.set_zoom(80)
ws.set_column(0, 0, 3)
ws.set_column(1, 1, 10)
ws.set_column(2, 4, 10)
ws.set_column(5, 5, 200)
ws.set_column(6, 10, 15)
row, col = 0, 0
ws.merge_range(xl_range(row, 0, row, 5), "HELP DESK Recovery Report", self.spFormats.chapter_title)
ws.set_row(0, 40)
ws.insert_image(0, 4, settings.logoAzul, {'x_scale': 0.75, 'y_scale': 0.75, 'x_offset': 400, 'y_offset': 5})
row += 1
ws.write(row, 0, 'Report Date:', self.spFormats.bold_right)
ws.write(row, 1, date.today().strftime('%d-%m-%Y'))
row += 1
ws.write(row, 0, 'CHANNEL = ', self.spFormats.bold_right)
# ws.write(row, 1, channel, self.spFormats.bold_red)
ws.merge_range(xl_range(row, 1, row, 2), channel, self.spFormats.bold_red)
row += 1
ws.write(row, 0, 'MAIN =', self.spFormats.bold_right)
ws.write(row, 1, '# {} issues'.format(len([item for item in data if item.instance == 'MAIN'])))
row += 1
ws.write(row, 0, 'TEST =', self.spFormats.red_bold_right)
ws.write(row, 1, '# {} issues'.format(len([item for item in data if item.instance == 'TEST'])))
row += 1
# ws.write(row, 1, 'NEW = ', self.spFormats.red_bold_right)
ws.merge_range(xl_range(row, 0, row, 1), 'NEW = ', self.spFormats.red_bold_right)
ws.write(row, 2, '# {} issues'
.format(len([item for item in data if item.instance == 'TEST' and item.type == 'NewIssue'])))
row += 1
# ws.write(row, 1, 'COMMENTS =', self.spFormats.blue)
ws.merge_range(xl_range(row, 0, row, 1), 'COMMENTS =', self.spFormats.right_bold_green)
ws.write(row, 2, '# {} issues'
.format(len([item for item in data if item.instance == 'TEST' and item.type == 'Comment'])))
row += 1
issuesInTestInstance = [item for item in data if item.instance == 'TEST' and item.type == 'NewIssue']
commentsInTestInstance = [item for item in data if item.instance == 'TEST' and item.type == 'Comment']
print(channel, len(data), ' Issues =', len(issuesInTestInstance), ' Comments= ', len(commentsInTestInstance))
comments = [item for item in data if item.type == 'Commnent']
newIssues = [item for item in data if item.type == 'NewIssue']
_selection = [item for item in data if item.assignee == 'Pietropaolo Alfonso']
_topics = [item.topic for item in _selection]
mydata = [_item for item in _selection for _item in data if _item.topic in _topics and any([email in _item.sender for email in item.sender]) ]
mydata = list(set(mydata))
_itemsList = sorted(mydata, key=attrgetter('created'))
_itemsList.sort(key=attrgetter('topic'))
row += 3
ws.merge_range(xl_range(row, 0, row, 5),'ISSUES by TOPIC', self.spFormats.bigSection)
row += 1
ws.write_row(row, 0, ('#','Created', 'Instance', 'Key', 'Status', 'Summary'), self.spFormats.column_heading)
row += 1
k = 0
for k,item in enumerate(_itemsList, start=1):
row += 1
self._write_out2(k, ws, row, item)
row += 1
ws.write(row, 0, '#items = {}'.format(k), self.spFormats.red)
return
def _write_out2(self, k, ws, row, item):
get_link = lambda a: 'http://130.206.80.89/browse/{}'.format(a)
ws.write(row, 0, k)
ws.write(row, 1, item.created.strftime("%d-%m-%Y"))
if item.instance == 'TEST':
ws.write(row, 2, item.instance, self.spFormats.red)
ws.write_url(row, 3, get_link(item.key), self.spFormats.link, item.key)
if item.type == 'Comment':
ws.write(row, 4, 'Comment', self.spFormats.green)
elif item.type == 'NewIssue':
ws.write(row, 4, 'New', self.spFormats.red)
else:
pass
else:
ws.write(row, 2, item.instance)
ws.write_url(row, 3, item.url, self.spFormats.link, item.key)
ws.write(row, 4, item.status)
ws.write(row, 5, 'Topic: {}\nReference: {}\nSender: {}\nReporter: {}\nAssignee:{}'
.format(item.topic, item.reference, item.sender,item.reporter, item.assignee))
# ws.write(row, 4, 'Sender: {}\nReporter: {}\nAssignee:{}'.format(item.sender,item.reporter, item.assignee))
def _helpdesk_report(self):
channels = ('Lab', 'Tech', 'General', 'Feedback', 'Collaboration', 'Speakers', 'Ops')
# channels = ('All', 'Lab', 'Tech')
for channel in channels:
#self._report_channel(channel)
self._verify_channel(channel)
# self._report_channel('All')
def _coachhelpdesk_report(self):
channels = ('CEED Tech', 'CreatiFI', 'EuropeanPioneers', 'FABulous', 'FI-ADOPT', 'FI-C3', 'FICHe', 'Finish',
'FINODEX', 'FRACTALS', 'FrontierCities', 'IMPACT', 'INCENSe', 'SmartAgriFood2', 'SOUL-FI',
'SpeedUp Europe')
for channel in channels:
# self._report_channel(channel)
self._verify_channel(channel)
def __call__(self, *args, **kwargs):
print("Help Desk Recovery Report")
_date = datetime.now().strftime("%Y%m%d-%H%M")
filename = 'FIWARE.helpdesk.recovery.'+ _date + '.xlsx'
# filename = 'FIWARE.coachhelpdesk.recovery.'+ _date + '.xlsx'
myfile = os.path.join(settings.outHome, filename)
self.workbook = xlsxwriter.Workbook(myfile)
self.spFormats = SpreadsheetFormats(self.workbook)
self._helpdesk_report()
# self._coachhelpdesk_report()
print(': W:' + myfile)
self.workbook.close()
if __name__ == "__main__":
report = Report()
report()
|
"""Tests of the learning curve regression workflow.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
2020, Citrine Informatics.
"""
import pytest
pytest.importorskip("sklearn")
import smlb
def test_learning_curve_regression():
"""Simple examples"""
from smlb.datasets.synthetic import Friedman1979Data
dataset = Friedman1979Data(dimensions=5)
validation_set = smlb.GridSampler(size=2 ** 5, domain=[0, 1], rng=0)
training_sizes = [10, 12, 16]
training_sets = tuple(
smlb.RandomVectorSampler(size=size, rng=0) for size in training_sizes
) # dataset domain is used by default
from smlb.learners import GaussianProcessRegressionSklearn
learner_gpr_skl = GaussianProcessRegressionSklearn(rng=0) # default is Gaussian kernel
from smlb.learners import RandomForestRegressionSklearn
learner_rf_skl = RandomForestRegressionSklearn(rng=0)
from smlb.workflows import LearningCurveRegression
workflow = LearningCurveRegression(
data=dataset,
training=training_sets,
validation=validation_set,
learners=[learner_rf_skl, learner_gpr_skl],
) # default evaluation
workflow.run()
|
from itertools import zip_longest
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
v1s, v2s = (
list(map(int, version1.split("."))),
list(map(int, version2.split("."))),
)
for v1, v2 in zip_longest(v1s, v2s, fillvalue=0):
if v1 < v2:
return -1
if v1 > v2:
return 1
return 0
# TESTS
tests = [
("0.1", "1.1", -1),
("1.0.1", "1", 1),
("7.5.2.4", "7.5.3", -1),
("1.01", "1.001", 0),
("1.0", "1.0.0", 0),
]
for version1, version2, expected in tests:
sol = Solution()
actual = sol.compareVersion(version1, version2)
print("Compare version", version1, version2, "->", actual)
assert actual == expected
|
import os
import tuned.logs
from . import base
from tuned.utils.commands import commands
log = tuned.logs.get()
class cpulist_invert(base.Function):
"""
Inverts list of CPUs (makes its complement). For the complement it
gets number of online CPUs from the /sys/devices/system/cpu/online,
e.g. system with 4 CPUs (0-3), the inversion of list "0,2,3" will be
"1"
"""
def __init__(self):
# arbitrary number of arguments
super(cpulist_invert, self).__init__("cpulist_invert", 0)
def execute(self, args):
if not super(cpulist_invert, self).execute(args):
return None
return ",".join(str(v) for v in self._cmd.cpulist_invert(",,".join(args)))
|
#!/usr/bin/python3
import os
import pathlib
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
plt.style.use('../stylesheet.mplstyle')
from nmrespy.core import Estimator
# ------------------------------------------------------------------------
# Set to True to carry out estimation.
# Set to False to reload already obtained results and simply plot figure
ESTIMATE = False
B = 0.05 # Bottom of lowest axes
T = 0.98 # Top of highest axes
R = 0.98 # Rightmost position of axes
L = 0.02 # Leftmost position of axes
H_SEP = 0.01 # Horizontal separation of figs a) -> c) and d) -> f)
INSET_RECT = [0.05, 0.5, 0.27, 0.4] # Location of inset axes in panels e) and f)
FIGSIZE = (3.5, 7) # Figure size (inches)
# x-ticks of panels a) -> f)
XTICKS = 4 * [[1.74 - i * (0.03) for i in range(5)]]
YLIMS = [ # y-limits of panels a) -> f)
(-1.5E4, 1.3E5),
(-4E4, 1.55E5),
(-4E4, 1.5E5),
]
COLORS = plt.rcParams['axes.prop_cycle'].by_key()['color'] # Need at least 5 colors
DISPS = [
# b)
iter([
(0.003, 3000), # 1
(-0.0025, 1000),
(0.006, 1000),
(0.001, 5000),
(0.002, 3000),
(-0.0025, 1000), # 6
(0.006, 1000),
(0.001, 3000),
(-0.0035, 2000),
(-0.0015, 2000),
(0.009, 1000), # 11
(0.003, 3000),
(0.005, 3000),
(-0.002, 2000),
(0.01, 2000),
(0.002, 2000), # 16
]),
# c)
iter([
(0.003, 3000), # 1
(-0.0025, 1000),
(0.006, 1000),
(0.001, 5000),
(0.002, 3000),
(-0.0025, 1000), # 6
(0.006, 1000),
(0.001, 3000),
(0.0035, 2000),
(-0.0015, 2000),
(0.009, 1000), # 11
(0.003, 3000),
(0.005, 3000),
(-0.002, 2000),
(0.009, 2000),
(0.0027, 2000), # 16
]),
]
LABEL_FS = 8 # Fontsize of oscillator labels
MODEL_SHIFTS = iter([2.5E4, 2E4]) # Upward displacement of model plots (+ve)
RESID_SHIFTS = iter([2E4, 2E4]) # Downward displacement of residual plots (+ve)
# ------------------------------------------------------------------------
def estimate():
pwd = pathlib.Path.cwd()
if not (relpath := pwd / 'results').is_dir():
os.mkdir(relpath)
datapath = pwd / '../data/2/pdata/1'
estimator.frequency_filter([[1.76, 1.6]], [[-4.6, -5.2]])
estimator.matrix_pencil(M=16)
estimator.to_pickle(path="result/mpm", force_overwrite=True)
estimator.nonlinear_programming(phase_variance=True, max_iterations=400, fprint=False)
estimator.to_pickle(path="result/nlp", force_overwrite=True)
desc = "1mM artemisinin in DMSO-d6"
for fmt in ["txt", "pdf", "csv"]:
if fmt == "pdf":
desc.replace("1mM", "$1$m\\textsc{M}")
desc.replace("d6", "\\emph{d}$_6$")
estimator.write_result(
path="result/artemisinin_result", fmt=fmt, description=desc,
force_overwrite=True,
)
np.savetxt("result/errors.txt", estimator.get_errors())
np.savetxt('result/parameters.txt', estimator.get_result())
def plot():
try:
estimators = [
Estimator.from_pickle(path="result/mpm"),
Estimator.from_pickle(path="result/nlp"),
]
except:
raise IOError("Couldn't find pickled estimator files")
# Colors of each oscillator
col_indices = 2 * [4 * [0] + 4 * [1] + 4 * [2] + 4 * [3]]
# List of plots
# Info will be extracted from these to construct customised plots
plots = []
for inds, est in zip(col_indices, estimators):
cols = [COLORS[i] for i in inds]
# Prevent error for estimators which have only had MPM run
est._saveable = True
plots.append(est.plot_result(data_color='k', oscillator_colors=cols))
shifts = plots[0].lines['data'].get_xdata()
spectrum = plots[0].lines['data'].get_ydata()
# --- Construct figure ---------------------------------------------------
fig = plt.figure(figsize=FIGSIZE)
xlims = 3 * [(shifts[0], shifts[-1])]
# Determine axes geometries
lefts = 3 * [L]
widths = 3 * [(R - L)]
heights = []
bottoms = []
spans = [abs(YLIMS[i][1] - YLIMS[i][0]) for i in range(0, 3)]
heights = [s / sum(spans) * (T - B) for s in spans]
bottoms = [B + sum(heights[i:]) for i in range(1, 4)]
dims = [[l, b, w, h] for l, b, w, h in zip(lefts, bottoms, widths, heights)]
# Create axes a), b) and c)
axs = []
for i, (dim, xl, yl, xtks) in enumerate(zip(dims, xlims, YLIMS, XTICKS)):
axs.append(fig.add_axes(dim))
ax = axs[-1]
# labels for each panel
# `trans` ensures x is in axes coords and y is in figure coords
trans = transforms.blended_transform_factory(
axs[-1].transAxes, fig.transFigure,
)
ax.text(
0.01, dim[1] + dim[3] - 0.025, f'{chr(i + 97)})',
transform=trans, fontsize=10, weight='bold'
)
# Set limits
ax.set_xlim(xl)
ax.set_ylim(yl)
ax.set_xticks(xtks)
if i == 2:
ax.set_xlabel('$^1$H (ppm)')
else:
ax.set_xticklabels([])
# Plot original data in panel a)
axs[0].plot(shifts, spectrum, color='k')
# Extract oscillator lines and labels
lines = []
labels = []
for p in plots:
# Strip data plot residual plots from lines (first and last elements)
lines.append([line for line in p.lines.values()][1:-1])
labels.append([lab for lab in p.labels.values()])
# Loop over b), c)
# These axes will show individual oscillators
osc_axes = axs[1:]
for i, (ax, lns, lbs, dsps) in enumerate(zip(osc_axes, lines, labels, DISPS)):
model = np.zeros(lns[0].get_xdata().shape)
for j, (ln, lb) in enumerate(zip(lns, lbs)):
# Plot oscillator line
color = ln.get_color()
x_ln = ln.get_xdata()
y_ln = ln.get_ydata()
ax.plot(x_ln, y_ln, color=color)
model += y_ln
# Add oscillator text label
x_lb, y_lb = lb.get_position()
txt = lb.get_text()
d_x, d_y = next(dsps)
ax.text(x_lb + d_x, y_lb + d_y, txt, color=color,
fontsize=LABEL_FS, zorder=200)
# Plot model and residual
residual = spectrum - model
ax.plot(shifts, model + next(MODEL_SHIFTS), color='k')
ax.plot(shifts, residual - next(RESID_SHIFTS), color='k')
fig.savefig(
'artemisinin.png', transparent=False, facecolor='#ffffff',
dpi=200,
)
if __name__ == '__main__':
if ESTIMATE:
estimate()
plot()
|
"""
util_array module. Contains the util_2d, util_3d and transient_2d classes.
These classes encapsulate modflow-style array inputs away
from the individual packages. The end-user should not need to
instantiate these classes directly.
"""
from __future__ import division, print_function
# from future.utils import with_metaclass
import os
import shutil
import copy
import numbers
import numpy as np
from ..utils.binaryfile import BinaryHeader
from ..utils.flopy_io import line_parse
class ArrayFormat(object):
"""
ArrayFormat class for handling various output format types for both
MODFLOW and flopy
Parameters
----------
u2d : Util2d instance
python : str (optional)
python-style output format descriptor e.g. {0:15.6e}
fortran : str (optional)
fortran style output format descriptor e.g. (2E15.6)
Attributes
----------
fortran : str
fortran format output descriptor (e.g. (100G15.6)
py : str
python format output descriptor (e.g. "{0:15.6E}")
numpy : str
numpy format output descriptor (e.g. "%15.6e")
npl : int
number if items per line of output
width : int
the width of the formatted numeric output
decimal : int
the number of decimal digits in the numeric output
format : str
the output format type e.g. I, G, E, etc
free : bool
free format flag
binary : bool
binary format flag
Methods
-------
get_default_numpy_fmt : (dtype : [np.int,np.float32])
a static method to get a default numpy dtype - used for loading
decode_fortran_descriptor : (fd : str)
a static method to decode fortran descriptors into npl, format,
width, decimal.
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, u2d, python=None, fortran=None,array_free_format=None):
assert isinstance(u2d, Util2d), "ArrayFormat only supports Util2d," + \
"not {0}".format(type(u2d))
if len(u2d.shape) == 1:
self._npl_full = u2d.shape[0]
else:
self._npl_full = u2d.shape[1]
self.dtype = u2d.dtype
self._npl = None
self._format = None
self._width = None
self._decimal = None
if array_free_format is not None:
self._freeformat_model = bool(array_free_format)
else:
self._freeformat_model = bool(u2d.model.array_free_format)
self.default_float_width = 15
self.default_int_width = 10
self.default_float_format = "E"
self.default_int_format = "I"
self.default_float_decimal = 6
self.default_int_decimal = 0
self._fmts = ['I', 'G', 'E', 'F']
self._isbinary = False
self._isfree = False
if python is not None and fortran is not None:
raise Exception("only one of [python,fortran] can be passed" +
"to ArrayFormat constructor")
if python is not None:
self._parse_python_format(python)
elif fortran is not None:
self._parse_fortran_format(fortran)
else:
self._set_defaults()
@property
def array_free_format(self):
return bool(self._freeformat_model)
def _set_defaults(self):
if self.dtype in [int, np.int, np.int32]:
self._npl = self._npl_full
self._format = self.default_int_format
self._width = self.default_int_width
self._decimal = None
elif self.dtype in [np.float32, bool]:
self._npl = self._npl_full
self._format = self.default_float_format
self._width = self.default_float_width
self._decimal = self.default_float_decimal
else:
raise Exception("ArrayFormat._set_defaults() error: " +
"unsupported dtype: {0}".format(str(self.dtype)))
def __str__(self):
s = "ArrayFormat: npl:{0},format:{1},width:{2},decimal{3}" \
.format(self.npl, self.format, self.width, self.decimal)
s += ",isfree:{0},isbinary:{1}".format(self._isfree, self._isbinary)
return s
@staticmethod
def get_default_numpy_fmt(dtype):
if dtype == np.int:
return "%10d"
elif dtype == np.float32:
return "%15.6E"
else:
raise Exception(
"ArrayFormat.get_default_numpy_fmt(): unrecognized " + \
"dtype, must be np.int or np.float32")
@classmethod
def integer(cls):
raise NotImplementedError()
@classmethod
def float(cls):
raise NotImplementedError()
@property
def binary(self):
return bool(self._isbinary)
@property
def free(self):
return bool(self._isfree)
def __eq__(self, other):
if isinstance(other, str):
if other.lower() == "free":
return self.free
if other.lower() == "binary":
return self.binary
else:
super(ArrayFormat, self).__eq__(other)
@property
def npl(self):
return copy.copy(self._npl)
@property
def format(self):
return copy.copy(self._format)
@property
def width(self):
return copy.copy(self._width)
@property
def decimal(self):
return copy.copy(self._decimal)
def __setattr__(self, key, value):
if key == "format":
value = value.upper()
assert value.upper() in self._fmts
if value == 'I':
assert self.dtype in [int, np.int, np.int32]
self._format = value
self._decimal = None
else:
if value == 'G':
print("'G' format being reset to 'E'")
value = 'E'
self._format = value
if self.decimal is None:
self._decimal = self.default_float_decimal
elif key == "width":
width = int(value)
if self.dtype == np.float32 and width < self.decimal:
raise Exception("width cannot be less than decimal")
elif self.dtype == np.float32 and \
width < self.default_float_width:
print("ArrayFormat warning:setting width less " +
"than default of {0}".format(self.default_float_width))
self._width = width
elif key == "decimal":
if self.dtype in [int, np.int, np.int32]:
raise Exception("cannot set decimal for integer dtypes")
else:
value = int(value)
if value < self.default_float_decimal:
print("ArrayFormat warning: setting decimal " +
" less than default of " +
"{0}".format(self.default_float_decimal))
if value < self.decimal:
print("ArrayFormat warning: setting decimal " +
" less than current value of " +
"{0}".format(self.default_float_decimal))
self._decimal = int(value)
elif key == "entries" \
or key == "entires_per_line" \
or key == "npl":
value = int(value)
assert value <= self._npl_full, "cannot set npl > shape"
self._npl = value
elif key.lower() == "binary":
value = bool(value)
if value and self.free:
# raise Exception("cannot switch from 'free' to 'binary' format")
self._isfree = False
self._isbinary = value
self._set_defaults()
elif key.lower() == "free":
value = bool(value)
if value and self.binary:
# raise Exception("cannot switch from 'binary' to 'free' format")
self._isbinary = False
self._isfree = bool(value)
self._set_defaults()
elif key.lower() == "fortran":
self._parse_fortran_format(value)
elif key.lower() == "python" or key.lower() == "py":
self._parse_python_format(value)
else:
super(ArrayFormat, self).__setattr__(key, value)
@property
def py(self):
return self._get_python_format()
def _get_python_format(self):
if self.format == 'I':
fmt = 'd'
else:
fmt = self.format
pd = '{0:' + str(self.width)
if self.decimal is not None:
pd += '.' + str(self.decimal) + fmt + '}'
else:
pd += fmt + '}'
if self.npl is None:
if self._isfree:
return (self._npl_full, pd)
else:
raise Exception("ArrayFormat._get_python_format() error: " + \
"format is not 'free' and npl is not set")
return (self.npl, pd)
def _parse_python_format(self, arg):
raise NotImplementedError()
@property
def fortran(self):
return self._get_fortran_format()
def _get_fortran_format(self):
if self._isfree:
return "(FREE)"
if self._isbinary:
return "(BINARY)"
fd = '({0:d}{1:s}{2:d}'.format(self.npl, self.format, self.width)
if self.decimal is not None:
fd += '.{0:d})'.format(self.decimal)
else:
fd += ')'
return fd
def _parse_fortran_format(self, arg):
"""Decode fortran descriptor
Parameters
----------
arg : str
Returns
-------
npl, fmt, width, decimal : int, str, int, int
"""
# strip off any quotes around format string
npl, fmt, width, decimal = ArrayFormat.decode_fortran_descriptor(arg)
if isinstance(npl, str):
if 'FREE' in npl.upper():
self._set_defaults()
self._isfree = True
return
elif 'BINARY' in npl.upper():
self._set_defaults()
self._isbinary = True
return
self._npl = int(npl)
self._format = fmt
self._width = int(width)
if decimal is not None:
self._decimal = int(decimal)
@property
def numpy(self):
return self._get_numpy_format()
def _get_numpy_format(self):
return "%{0}{1}.{2}".format(self.width, self.format, self.decimal)
@staticmethod
def decode_fortran_descriptor(fd):
"""Decode fortran descriptor
Parameters
----------
fd : str
Returns
-------
npl, fmt, width, decimal : int, str, int, int
"""
# strip off any quotes around format string
fd = fd.replace("'", "")
fd = fd.replace('"', '')
# strip off '(' and ')'
fd = fd.strip()[1:-1]
if str('FREE') in str(fd.upper()):
return 'free', None, None, None
elif str('BINARY') in str(fd.upper()):
return 'binary', None, None, None
if str('.') in str(fd):
raw = fd.split('.')
decimal = int(raw[1])
else:
raw = [fd]
decimal = None
fmts = ['ES', 'EN', 'I', 'G', 'E', 'F']
raw = raw[0].upper()
for fmt in fmts:
if fmt in raw:
raw = raw.split(fmt)
# '(F9.0)' will return raw = ['', '9']
# try and except will catch this
try:
npl = int(raw[0])
width = int(raw[1])
except:
npl = 1
width = int(raw[1])
if fmt == 'G':
fmt = 'E'
elif fmt == 'ES':
fmt = 'E'
elif fmt == 'EN':
fmt = 'E'
return npl, fmt, width, decimal
raise Exception('Unrecognized format type: ' +
str(fd) + ' looking for: ' + str(fmts))
def read1d(f, a):
"""
Fill the 1d array, a, with the correct number of values. Required in
case lpf 1d arrays (chani, layvka, etc) extend over more than one line
"""
values = []
while True:
line = f.readline()
t = line_parse(line)
values = values + t
if len(values) >= a.shape[0]:
break
a[:] = np.array(values[0:a.shape[0]], dtype=a.dtype)
return a
def new_u2d(old_util2d, value):
new_util2d = Util2d(old_util2d.model, old_util2d.shape, old_util2d.dtype,
value, old_util2d.name, old_util2d.format.fortran,
old_util2d.cnstnt, old_util2d.iprn,
old_util2d.ext_filename, old_util2d.locat,
old_util2d.format.binary,
array_free_format=old_util2d.format.array_free_format)
return new_util2d
class Util3d(object):
"""
Util3d class for handling 3-D model arrays. just a thin wrapper around
Util2d
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
shape : length 3 tuple
shape of the 3-D array, typically (nlay,nrow,ncol)
dtype : [np.int,np.float32,np.bool]
the type of the data
value : variable
the data to be assigned to the 3-D array.
can be a scalar, list, or ndarray
name : string
name of the property, used for writing comments to input files
fmtin : string
modflow fmtin variable (optional). (the default is None)
cnstnt : string
modflow cnstnt variable (optional) (the default is 1.0)
iprn : int
modflow iprn variable (optional) (the default is -1)
locat : int
modflow locat variable (optional) (the default is None). If the model
instance does not support free format and the
external flag is not set and the value is a simple scalar,
then locat must be explicitly passed as it is the unit number
to read the array from
ext_filename : string
the external filename to write the array representation to
(optional) (the default is None) .
If type(value) is a string and is an accessible filename, the
ext_filename is reset to value.
bin : bool
flag to control writing external arrays as binary (optional)
(the defaut is False)
Attributes
----------
array : np.ndarray
the array representation of the 3-D object
Methods
-------
get_file_entry : string
get the model input file string including the control record for the
entire 3-D property
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, model, shape, dtype, value, name,
fmtin=None, cnstnt=1.0, iprn=-1, locat=None,
ext_unit_dict=None, array_free_format=None):
"""
3-D wrapper from Util2d - shape must be 3-D
"""
self.array_free_format = array_free_format
if isinstance(value, Util3d):
for attr in value.__dict__.items():
setattr(self,attr[0], attr[1])
self.model = model
self.array_free_format=array_free_format
for i, u2d in enumerate(self.util_2ds):
self.util_2ds[i] = Util2d(model, u2d.shape, u2d.dtype,
u2d._array, name=u2d.name,
fmtin=u2d.format.fortran,
locat=locat,
cnstnt=u2d.cnstnt,
ext_filename=u2d.filename,
array_free_format=array_free_format)
return
assert len(shape) == 3, 'Util3d:shape attribute must be length 3'
self.model = model
self.shape = shape
self.dtype = dtype
self.__value = value
if isinstance(name, list):
self.name = name
else:
t = []
for k in range(shape[0]):
t.append(name)
self.name = t
self.name_base = []
for k in range(shape[0]):
if 'Layer' not in self.name[k]:
self.name_base.append(self.name[k] + ' Layer ')
else:
self.name_base.append(self.name[k])
self.fmtin = fmtin
self.cnstnt = cnstnt
self.iprn = iprn
self.locat = locat
self.ext_filename_base = []
if model.external_path is not None:
for k in range(shape[0]):
self.ext_filename_base. \
append(os.path.join(model.external_path,
self.name_base[k].replace(' ', '_')))
else:
for k in range(shape[0]):
self.ext_filename_base. \
append(self.name_base[k].replace(' ', '_'))
self.util_2ds = self.build_2d_instances()
def __setitem__(self, k, value):
if isinstance(k, int):
assert k in range(0, self.shape[
0]), "Util3d error: k not in range nlay"
self.util_2ds[k] = new_u2d(self.util_2ds[k], value)
else:
raise NotImplementedError(
"Util3d doesn't support setitem indices" + str(k))
def __setattr__(self, key, value):
if hasattr(self, "util_2ds") and key == "cnstnt":
# set the cnstnt for each u2d
for u2d in self.util_2ds:
u2d.cnstnt = value
elif hasattr(self, "util_2ds") and key == "fmtin":
for u2d in self.util_2ds:
u2d.format = ArrayFormat(u2d, fortran=value,
array_free_format=self.array_free_format)
super(Util3d,self).__setattr__("fmtin",value)
elif hasattr(self, "util_2ds") and key == "how":
for u2d in self.util_2ds:
u2d.how = value
else:
# set the attribute for u3d
super(Util3d, self).__setattr__(key, value)
def export(self, f, **kwargs):
from flopy import export
return export.utils.util3d_helper(f, self, **kwargs)
def to_shapefile(self, filename):
"""
Export 3-D model data to shapefile (polygons). Adds an
attribute for each Util2d in self.u2ds
Parameters
----------
filename : str
Shapefile name to write
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.lpf.hk.to_shapefile('test_hk.shp')
"""
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()")
# from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name
#
# array_dict = {}
# for ilay in range(self.model.nlay):
# u2d = self[ilay]
# name = '{}_{:03d}'.format(shape_attr_name(u2d.name), ilay + 1)
# array_dict[name] = u2d.array
# write_grid_shapefile(filename, self.model.dis.sr,
# array_dict)
self.export(filename)
def plot(self, filename_base=None, file_extension=None, mflay=None,
fignum=None, **kwargs):
"""
Plot 3-D model input data
Parameters
----------
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.lpf.hk.plot()
"""
import flopy.plot.plotutil as pu
if file_extension is not None:
fext = file_extension
else:
fext = 'png'
names = ['{} layer {}'.format(self.name[k], k + 1) for k in
range(self.shape[0])]
filenames = None
if filename_base is not None:
if mflay is not None:
i0 = int(mflay)
if i0 + 1 >= self.shape[0]:
i0 = self.shape[0] - 1
i1 = i0 + 1
else:
i0 = 0
i1 = self.shape[0]
# build filenames
filenames = ['{}_{}_Layer{}.{}'.format(filename_base, self.name[k],
k + 1, fext) for k in
range(i0, i1)]
return pu._plot_array_helper(self.array, self.model,
names=names, filenames=filenames,
mflay=mflay, fignum=fignum, **kwargs)
def __getitem__(self, k):
if (isinstance(k, int) or
np.issubdtype(getattr(k, 'dtype', None), np.integer)):
return self.util_2ds[k]
elif len(k) == 3:
return self.array[k[0], k[1], k[2]]
else:
raise Exception("Util3d error: unsupported indices:" + str(k))
def get_file_entry(self):
s = ''
for u2d in self.util_2ds:
s += u2d.get_file_entry()
return s
def get_value(self):
value = []
for u2d in self.util_2ds:
value.append(u2d.get_value())
return value
@property
def array(self):
'''
Return a numpy array of the 3D shape. If an unstructured model, then
return an array of size nodes.
'''
nlay, nrow, ncol = self.shape
if nrow is not None:
# typical 3D case
a = np.empty((self.shape), dtype=self.dtype)
# for i,u2d in self.uds:
for i, u2d in enumerate(self.util_2ds):
a[i] = u2d.array
else:
# unstructured case
nodes = ncol.sum()
a = np.empty((nodes), dtype=self.dtype)
istart = 0
for i, u2d in enumerate(self.util_2ds):
istop = istart + ncol[i]
a[istart:istop] = u2d.array
istart = istop
return a
def build_2d_instances(self):
u2ds = []
# if value is not enumerable, then make a list of something
if not isinstance(self.__value, list) \
and not isinstance(self.__value, np.ndarray):
self.__value = [self.__value] * self.shape[0]
# if this is a list or 1-D array with constant values per layer
if isinstance(self.__value, list) \
or (isinstance(self.__value, np.ndarray)
and (self.__value.ndim == 1)):
assert len(self.__value) == self.shape[0], \
'length of 3d enumerable:' + str(len(self.__value)) + \
' != to shape[0]:' + str(self.shape[0])
for i, item in enumerate(self.__value):
if isinstance(item, Util2d):
# we need to reset the external name because most of the
# load() methods don't use layer-specific names
item._ext_filename = self.ext_filename_base[i] + \
"{0}.ref".format(i + 1)
# reset the model instance in cases these Util2d's
# came from another model instance
item.model = self.model
u2ds.append(item)
else:
name = self.name_base[i] + str(i + 1)
ext_filename = None
if self.model.external_path is not None:
ext_filename = self.ext_filename_base[i] + str(i + 1) + \
'.ref'
shp = self.shape[1:]
if shp[0] is None:
# allow for unstructured so that ncol changes by layer
shp = (1, self.shape[2][i])
u2d = Util2d(self.model, shp, self.dtype, item,
fmtin=self.fmtin, name=name,
ext_filename=ext_filename,
locat=self.locat,
array_free_format=self.array_free_format)
u2ds.append(u2d)
elif isinstance(self.__value, np.ndarray):
# if an array of shape nrow,ncol was passed, tile it out for each layer
if self.__value.shape[0] != self.shape[0]:
if self.__value.shape == (self.shape[1], self.shape[2]):
self.__value = [self.__value] * self.shape[0]
else:
raise Exception('value shape[0] != to self.shape[0] and' +
'value.shape[[1,2]] != self.shape[[1,2]]' +
str(self.__value.shape) + ' ' + str(
self.shape))
for i, a in enumerate(self.__value):
a = np.atleast_2d(a)
ext_filename = None
name = self.name_base[i] + str(i + 1)
if self.model.external_path is not None:
ext_filename = self.ext_filename_base[i] + str(
i + 1) + '.ref'
u2d = Util2d(self.model, self.shape[1:], self.dtype, a,
fmtin=self.fmtin, name=name,
ext_filename=ext_filename,
locat=self.locat,
array_free_format=self.array_free_format)
u2ds.append(u2d)
else:
raise Exception('util_array_3d: value attribute must be list ' +
' or ndarray, not' + str(type(self.__value)))
return u2ds
@staticmethod
def load(f_handle, model, shape, dtype, name, ext_unit_dict=None,
array_format=None):
assert len(shape) == 3, 'Util3d:shape attribute must be length 3'
nlay, nrow, ncol = shape
u2ds = []
for k in range(nlay):
u2d_name = name + '_Layer_{0}'.format(k)
if nrow is None:
nr = 1
nc = ncol[k]
else:
nr = nrow
nc = ncol
u2d = Util2d.load(f_handle, model, (nr, nc), dtype, u2d_name,
ext_unit_dict=ext_unit_dict,
array_format=array_format)
u2ds.append(u2d)
u3d = Util3d(model, shape, dtype, u2ds, name)
return u3d
def __mul__(self, other):
if np.isscalar(other):
new_u2ds = []
for u2d in self.util_2ds:
new_u2ds.append(u2d * other)
return Util3d(self.model, self.shape, self.dtype, new_u2ds,
self.name, self.fmtin, self.cnstnt, self.iprn,
self.locat)
elif isinstance(other, list):
assert len(other) == self.shape[0]
new_u2ds = []
for u2d, item in zip(self.util_2ds, other):
new_u2ds.append(u2d * item)
return Util3d(self.model, self.shape, self.dtype, new_u2ds,
self.name, self.fmtin, self.cnstnt, self.iprn,
self.locat)
class Transient3d(object):
"""
Transient3d class for handling time-dependent 3-D model arrays.
just a thin wrapper around Util3d
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
shape : length 3 tuple
shape of the 3-D transient arrays, typically (nlay,nrow,ncol)
dtype : [np.int,np.float32,np.bool]
the type of the data
value : variable
the data to be assigned to the 3-D arrays. Typically a dict
of {kper:value}, where kper is the zero-based stress period
to assign a value to. Value should be cast-able to Util2d instance
can be a scalar, list, or ndarray is the array value is constant in
time.
name : string
name of the property, used for writing comments to input files and
for forming external files names (if needed)
fmtin : string
modflow fmtin variable (optional). (the default is None)
cnstnt : string
modflow cnstnt variable (optional) (the default is 1.0)
iprn : int
modflow iprn variable (optional) (the default is -1)
locat : int
modflow locat variable (optional) (the default is None). If the model
instance does not support free format and the
external flag is not set and the value is a simple scalar,
then locat must be explicitly passed as it is the unit number
to read the array from
ext_filename : string
the external filename to write the array representation to
(optional) (the default is None) .
If type(value) is a string and is an accessible filename,
the ext_filename is reset to value.
bin : bool
flag to control writing external arrays as binary (optional)
(the default is False)
Attributes
----------
transient_3ds : dict{kper:Util3d}
the transient sequence of Util3d objects
Methods
-------
get_kper_entry : (itmp,string)
get the itmp value and the Util2d file entry of the value in
transient_2ds in bin kper. if kper < min(Transient2d.keys()),
return (1,zero_entry<Util2d>). If kper > < min(Transient2d.keys()),
but is not found in Transient2d.keys(), return (-1,'')
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, model, shape, dtype, value, name, fmtin=None,
cnstnt=1.0, iprn=-1, ext_filename=None, locat=None,
bin=False, array_free_format=None):
if isinstance(value, Transient3d):
for attr in value.__dict__.items():
setattr(self, attr[0], attr[1])
self.model = model
return
self.model = model
assert len(shape) == 3, "Transient3d error: shape arg must be " + \
"length three (nlay, nrow, ncol), not " + \
str(shape)
self.shape = shape
self.dtype = dtype
self.__value = value
self.name_base = name
self.fmtin = fmtin
self.cnstst = cnstnt
self.iprn = iprn
self.locat = locat
self.array_free_format=array_free_format
self.transient_3ds = self.build_transient_sequence()
return
def __setattr__(self, key, value):
# set the attribute for u3d, even for cnstnt
super(Transient3d, self).__setattr__(key, value)
def get_zero_3d(self, kper):
name = self.name_base + str(kper + 1) + '(filled zero)'
return Util3d(self.model, self.shape,
self.dtype, 0.0, name=name,
array_free_format=self.array_free_format)
def __getitem__(self, kper):
if kper in list(self.transient_3ds.keys()):
return self.transient_3ds[kper]
elif kper < min(self.transient_3ds.keys()):
return self.get_zero_3d(kper)
else:
for i in range(kper, -1, -1):
if i in list(self.transient_3ds.keys()):
return self.transient_3ds[i]
raise Exception("Transient2d.__getitem__(): error:" + \
" could not find an entry before kper {0:d}".format(
kper))
def __setitem__(self, key, value):
try:
key = int(key)
except Exception as e:
raise Exception("Transient3d.__setitem__() error: " + \
"'key'could not be cast to int:{0}".format(str(e)))
nper = self.model.nper
if key > self.model.nper or key < 0:
raise Exception("Transient3d.__setitem__() error: " + \
"key {0} not in nper range {1}:{2}".format(key, 0,
nper))
self.transient_3ds[key] = self.__get_3d_instance(key, value)
@property
def array(self):
arr = np.zeros((self.model.nper, self.shape[0], self.shape[1],
self.shape[2]), dtype=self.dtype)
for kper in range(self.model.nper):
u3d = self[kper]
for k in range(self.shape[0]):
arr[kper, k, :, :] = u3d[k].array
return arr
def get_kper_entry(self, kper):
"""
get the file entry info for a given kper
returns (itmp,file entry string from Util3d)
"""
if kper in self.transient_3ds:
s = ''
for k in range(self.shape[0]):
s += self.transient_3ds[kper][k].get_file_entry()
return 1, s
elif kper < min(self.transient_3ds.keys()):
t = self.get_zero_3d(kper).get_file_entry()
s = ''
for k in range(self.shape[0]):
s += t[k].get_file_entry()
return 1, s
else:
return -1, ''
def build_transient_sequence(self):
"""
parse self.__value into a dict{kper:Util3d}
"""
# a dict keyed on kper (zero-based)
if isinstance(self.__value, dict):
tran_seq = {}
for key, val in self.__value.items():
try:
key = int(key)
except:
raise Exception("Transient3d error: can't cast key: " +
str(key) + " to kper integer")
if key < 0:
raise Exception("Transient3d error: key can't be " +
" negative: " + str(key))
try:
u3d = self.__get_3d_instance(key, val)
except Exception as e:
raise Exception("Transient3d error building Util3d " +
" instance from value at kper: " +
str(key) + "\n" + str(e))
tran_seq[key] = u3d
return tran_seq
# these are all for single entries - use the same Util2d for all kper
# an array of shape (nrow,ncol)
elif isinstance(self.__value, np.ndarray):
return {0: self.__get_3d_instance(0, self.__value)}
# a filename
elif isinstance(self.__value, str):
return {0: self.__get_3d_instance(0, self.__value)}
# a scalar
elif np.isscalar(self.__value):
return {0: self.__get_3d_instance(0, self.__value)}
# lists aren't allowed
elif isinstance(self.__value, list):
raise Exception("Transient3d error: value cannot be a list " +
"anymore. try a dict{kper,value}")
else:
raise Exception("Transient3d error: value type not " +
" recognized: " + str(type(self.__value)))
def __get_3d_instance(self, kper, arg):
"""
parse an argument into a Util3d instance
"""
name = '{}_period{}'.format(self.name_base, kper + 1)
u3d = Util3d(self.model, self.shape, self.dtype, arg,
fmtin=self.fmtin, name=name,
# ext_filename=ext_filename,
locat=self.locat,
array_free_format=self.array_free_format)
return u3d
class Transient2d(object):
"""
Transient2d class for handling time-dependent 2-D model arrays.
just a thin wrapper around Util2d
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
shape : length 2 tuple
shape of the 2-D transient arrays, typically (nrow,ncol)
dtype : [np.int,np.float32,np.bool]
the type of the data
value : variable
the data to be assigned to the 2-D arrays. Typically a dict
of {kper:value}, where kper is the zero-based stress period
to assign a value to. Value should be cast-able to Util2d instance
can be a scalar, list, or ndarray is the array value is constant in
time.
name : string
name of the property, used for writing comments to input files and
for forming external files names (if needed)
fmtin : string
modflow fmtin variable (optional). (the default is None)
cnstnt : string
modflow cnstnt variable (optional) (the default is 1.0)
iprn : int
modflow iprn variable (optional) (the default is -1)
locat : int
modflow locat variable (optional) (the default is None). If the model
instance does not support free format and the
external flag is not set and the value is a simple scalar,
then locat must be explicitly passed as it is the unit number
to read the array from
ext_filename : string
the external filename to write the array representation to
(optional) (the default is None) .
If type(value) is a string and is an accessible filename,
the ext_filename is reset to value.
bin : bool
flag to control writing external arrays as binary (optional)
(the default is False)
Attributes
----------
transient_2ds : dict{kper:Util2d}
the transient sequence of Util2d objects
Methods
-------
get_kper_entry : (itmp,string)
get the itmp value and the Util2d file entry of the value in
transient_2ds in bin kper. if kper < min(Transient2d.keys()),
return (1,zero_entry<Util2d>). If kper > < min(Transient2d.keys()),
but is not found in Transient2d.keys(), return (-1,'')
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, model, shape, dtype, value, name, fmtin=None,
cnstnt=1.0, iprn=-1, ext_filename=None, locat=None,
bin=False,array_free_format=None):
if isinstance(value, Transient2d):
for attr in value.__dict__.items():
setattr(self, attr[0], attr[1])
for kper, u2d in self.transient_2ds.items():
self.transient_2ds[kper] = Util2d(model, u2d.shape, u2d.dtype,
u2d._array, name=u2d.name,
fmtin=u2d.format.fortran,
locat=locat,
cnstnt=u2d.cnstnt,
ext_filename=u2d.filename,
array_free_format=array_free_format)
self.model = model
return
self.model = model
assert len(shape) == 2, "Transient2d error: shape arg must be " + \
"length two (nrow, ncol), not " + \
str(shape)
if shape[0] is None:
# allow for unstructured so that ncol changes by layer
shape = (1, shape[1][0])
self.shape = shape
self.dtype = dtype
self.__value = value
self.name_base = name
self.fmtin = fmtin
self.cnstst = cnstnt
self.iprn = iprn
self.locat = locat
self.array_free_format = array_free_format
if model.external_path is not None:
self.ext_filename_base = \
os.path.join(model.external_path,
self.name_base.replace(' ', '_'))
else:
self.ext_filename_base = self.name_base.replace(' ', '_')
self.transient_2ds = self.build_transient_sequence()
return
@staticmethod
def masked4d_array_to_kper_dict(m4d):
assert m4d.ndim == 4
kper_dict = {}
for kper, arr in enumerate(m4d):
if np.all(np.isnan(arr)):
continue
elif np.any(np.isnan(arr)):
raise Exception("masked value found in array")
kper_dict[kper] = arr.copy()
return kper_dict
@classmethod
def from_4d(cls, model, pak_name, m4ds):
"""construct a Transient2d instance from a
dict(name: (masked) 4d numpy.ndarray
Parameters
----------
model : flopy.mbase derived type
pak_name : str package name (e.g. RCH)
m4ds : dict(name,(masked) 4d numpy.ndarray)
each ndarray must have shape (nper,1,nrow,ncol).
if an entire (nrow,ncol) slice is np.NaN, then
that kper is skipped.
Returns
-------
Transient2d instance
"""
assert isinstance(m4ds, dict)
keys = list(m4ds.keys())
assert len(keys) == 1
name = keys[0]
m4d = m4ds[name]
assert m4d.ndim == 4
assert m4d.shape[0] == model.nper
assert m4d.shape[1] == 1
assert m4d.shape[2] == model.nrow
assert m4d.shape[3] == model.ncol
m4d = m4d.astype(np.float32)
kper_dict = Transient2d.masked4d_array_to_kper_dict(m4d)
return cls(model=model, shape=(model.nrow, model.ncol),
value=kper_dict,
dtype=m4d.dtype.type, name=name)
def __setattr__(self, key, value):
if hasattr(self, "transient_2ds") and key == "cnstnt":
# set cnstnt for each u2d
for kper, u2d in self.transient_2ds.items():
self.transient_2ds[kper].cnstnt = value
elif hasattr(self, "transient_2ds") and key == "fmtin":
# set fmtin for each u2d
for kper, u2d in self.transient_2ds.items():
self.transient_2ds[kper].format = ArrayFormat(u2d,
fortran=value)
elif hasattr(self, "transient_2ds") and key == "how":
# set how for each u2d
for kper, u2d in self.transient_2ds.items():
self.transient_2ds[kper].how = value
# set the attribute for u3d, even for cnstnt
super(Transient2d, self).__setattr__(key, value)
def get_zero_2d(self, kper):
name = self.name_base + str(kper + 1) + '(filled zero)'
return Util2d(self.model, self.shape,
self.dtype, 0.0, name=name,
array_free_format=self.array_free_format)
def to_shapefile(self, filename):
"""
Export transient 2D data to a shapefile (as polygons). Adds an
attribute for each unique Util2d instance in self.data
Parameters
----------
filename : str
Shapefile name to write
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.rch.rech.as_shapefile('test_rech.shp')
"""
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()")
# from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name
#
# array_dict = {}
# for kper in range(self.model.nper):
# u2d = self[kper]
# name = '{}_{:03d}'.format(shape_attr_name(u2d.name), kper + 1)
# array_dict[name] = u2d.array
# write_grid_shapefile(filename, self.model.dis.sr, array_dict)
self.export(filename)
def plot(self, filename_base=None, file_extension=None, **kwargs):
"""
Plot transient 2-D model input data
Parameters
----------
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
kper : str
MODFLOW zero-based stress period number to return. If
kper='all' then data for all stress period will be
extracted. (default is zero).
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.rch.rech.plot()
"""
import flopy.plot.plotutil as pu
if file_extension is not None:
fext = file_extension
else:
fext = 'png'
if 'kper' in kwargs:
kk = kwargs['kper']
kwargs.pop('kper')
try:
kk = kk.lower()
if kk == 'all':
k0 = 0
k1 = self.model.nper
else:
k0 = 0
k1 = 1
except:
k0 = int(kk)
k1 = k0 + 1
# if kwargs['kper'] == 'all':
# kwargs.pop('kper')
# k0 = 0
# k1 = self.model.nper
# else:
# k0 = int(kwargs.pop('kper'))
# k1 = k0 + 1
else:
k0 = 0
k1 = 1
if 'fignum' in kwargs:
fignum = kwargs.pop('fignum')
else:
fignum = list(range(k0, k1))
if 'mflay' in kwargs:
kwargs.pop('mflay')
axes = []
for idx, kper in enumerate(range(k0, k1)):
title = '{} stress period {:d}'. \
format(self.name_base.replace('_', '').upper(),
kper + 1)
if filename_base is not None:
filename = filename_base + '_{:05d}.{}'.format(kper + 1, fext)
else:
filename = None
axes.append(pu._plot_array_helper(self[kper].array, self.model,
names=title, filenames=filename,
fignum=fignum[idx], **kwargs))
return axes
def __getitem__(self, kper):
if kper in list(self.transient_2ds.keys()):
return self.transient_2ds[kper]
elif kper < min(self.transient_2ds.keys()):
return self.get_zero_2d(kper)
else:
for i in range(kper, -1, -1):
if i in list(self.transient_2ds.keys()):
return self.transient_2ds[i]
raise Exception("Transient2d.__getitem__(): error:" + \
" could not find an entry before kper {0:d}".format(
kper))
def __setitem__(self, key, value):
try:
key = int(key)
except Exception as e:
raise Exception("Transient2d.__setitem__() error: " + \
"'key'could not be cast to int:{0}".format(str(e)))
nper = self.model.nper
if key > self.model.nper or key < 0:
raise Exception("Transient2d.__setitem__() error: " + \
"key {0} not in nper range {1}:{2}".format(key, 0,
nper))
self.transient_2ds[key] = self.__get_2d_instance(key, value)
@property
def array(self):
arr = np.zeros((self.model.nper, 1, self.shape[0], self.shape[1]),
dtype=self.dtype)
for kper in range(self.model.nper):
u2d = self[kper]
arr[kper, 0, :, :] = u2d.array
return arr
def export(self, f, **kwargs):
from flopy import export
return export.utils.transient2d_helper(f, self, **kwargs)
def get_kper_entry(self, kper):
"""
get the file entry info for a given kper
returns (itmp,file entry string from Util2d)
"""
if kper in self.transient_2ds:
return (1, self.transient_2ds[kper].get_file_entry())
elif kper < min(self.transient_2ds.keys()):
return (1, self.get_zero_2d(kper).get_file_entry())
else:
return (-1, '')
def build_transient_sequence(self):
"""
parse self.__value into a dict{kper:Util2d}
"""
# a dict keyed on kper (zero-based)
if isinstance(self.__value, dict):
tran_seq = {}
for key, val in self.__value.items():
try:
key = int(key)
except:
raise Exception("Transient2d error: can't cast key: " +
str(key) + " to kper integer")
if key < 0:
raise Exception("Transient2d error: key can't be " +
" negative: " + str(key))
try:
u2d = self.__get_2d_instance(key, val)
except Exception as e:
raise Exception("Transient2d error building Util2d " +
" instance from value at kper: " +
str(key) + "\n" + str(e))
tran_seq[key] = u2d
return tran_seq
# these are all for single entries - use the same Util2d for all kper
# an array of shape (nrow,ncol)
elif isinstance(self.__value, np.ndarray):
return {0: self.__get_2d_instance(0, self.__value)}
# a filename
elif isinstance(self.__value, str):
return {0: self.__get_2d_instance(0, self.__value)}
# a scalar
elif np.isscalar(self.__value):
return {0: self.__get_2d_instance(0, self.__value)}
# lists aren't allowed
elif isinstance(self.__value, list):
raise Exception("Transient2d error: value cannot be a list " +
"anymore. try a dict{kper,value}")
else:
raise Exception("Transient2d error: value type not " +
" recognized: " + str(type(self.__value)))
def __get_2d_instance(self, kper, arg):
"""
parse an argument into a Util2d instance
"""
ext_filename = None
name = self.name_base + str(kper + 1)
ext_filename = self.ext_filename_base + str(kper) + '.ref'
u2d = Util2d(self.model, self.shape, self.dtype, arg,
fmtin=self.fmtin, name=name,
ext_filename=ext_filename,
locat=self.locat,
array_free_format=self.array_free_format)
return u2d
class Util2d(object):
"""
Util2d class for handling 2-D model arrays
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
shape : lenght 3 tuple
shape of the 3-D array
dtype : [np.int,np.float32,np.bool]
the type of the data
value : variable
the data to be assigned to the 2-D array.
can be a scalar, list, ndarray, or filename
name : string
name of the property (optional). (the default is None
fmtin : string
modflow fmtin variable (optional). (the default is None)
cnstnt : string
modflow cnstnt variable (optional) (the default is 1.0)
iprn : int
modflow iprn variable (optional) (the default is -1)
locat : int
modflow locat variable (optional) (the default is None). If the model
instance does not support free format and the
external flag is not set and the value is a simple scalar,
then locat must be explicitly passed as it is the unit number
to read the array from)
ext_filename : string
the external filename to write the array representation to
(optional) (the default is None) .
If type(value) is a string and is an accessible filename,
the ext_filename is reset to value.
bin : bool
flag to control writing external arrays as binary (optional)
(the default is False)
Attributes
----------
array : np.ndarray
the array representation of the 2-D object
how : str
the str flag to control how the array is written to the model
input files e.g. "constant","internal","external","openclose"
format : ArrayFormat object
controls the ASCII representation of the numeric array
Methods
-------
get_file_entry : string
get the model input file string including the control record
See Also
--------
Notes
-----
If value is a valid filename and model.external_path is None, then a copy
of the file is made and placed in model.model_ws directory.
If value is a valid filename and model.external_path is not None, then
a copy of the file is made a placed in the external_path directory.
If value is a scalar, it is always written as a constant, regardless of
the model.external_path setting.
If value is an array and model.external_path is not None, then the array
is written out in the external_path directory. The name of the file that
holds the array is created from the name attribute. If the model supports
"free format", then the array is accessed via the "open/close" approach.
Otherwise, a unit number and filename is added to the name file.
If value is an array and model.external_path is None, then the array is
written internally to the model input file.
Examples
--------
"""
def __init__(self, model, shape, dtype, value, name, fmtin=None,
cnstnt=1.0, iprn=-1, ext_filename=None, locat=None, bin=False,
how=None, array_free_format=None):
"""
1d or 2-d array support with minimum of mem footprint.
only creates arrays as needed,
otherwise functions with strings or constants
shape = 1-d or 2-d tuple
value = an instance of string,list,np.int,np.float32,np.bool or np.ndarray
vtype = str,np.int,np.float32,np.bool, or np.ndarray
dtype = np.int, or np.float32
if ext_filename is passed, scalars are written externally as arrays
model instance bool attribute "array_free_format" used for generating control record
model instance string attribute "external_path"
used to determine external array writing
bin controls writing of binary external arrays
"""
if isinstance(value, Util2d):
for attr in value.__dict__.items():
setattr(self, attr[0], attr[1])
self.model = model
self.name = name
self._ext_filename = self.name.replace(' ', '_') + ".ref"
if ext_filename is not None:
self.ext_filename = ext_filename.lower()
else:
self.ext_filename = None
if locat is not None:
self.locat = locat
return
# some defense
if dtype not in [np.int, np.int32, np.float32, np.bool]:
raise Exception('Util2d:unsupported dtype: ' + str(dtype))
if name is not None:
name = name.lower()
if ext_filename is not None:
ext_filename = ext_filename.lower()
self.model = model
for s in shape:
assert isinstance(s,
numbers.Integral), "all shape elements must be integers, " + \
"not {0}:{1}".format(type(s),
str(s))
self.shape = shape
self.dtype = dtype
self.name = name
self.locat = locat
self.parse_value(value)
if self.vtype == str:
fmtin = "(FREE)"
self.__value_built = None
#if isinstance(dtype, np.float) or isinstance(dtype, np.float32):
#if dtype in [float,np.float,np.float32]:
# self.cnstnt = float(cnstnt)
#else:
# self.cnstnt = int(cnstnt)
self.cnstnt = dtype(cnstnt)
self.iprn = iprn
self._format = ArrayFormat(self, fortran=fmtin,
array_free_format=array_free_format)
self._format.binary = bool(bin)
self.ext_filename = ext_filename
self._ext_filename = self.name.replace(' ', '_') + ".ref"
self._acceptable_hows = ["constant", "internal", "external",
"openclose"]
if how is not None:
how = how.lower()
assert how in self._acceptable_hows
self._how = how
else:
self._decide_how()
def _decide_how(self):
# if a constant was passed in
if self.vtype in [np.int, np.float32]:
self._how = "constant"
# if a filename was passed in or external path was set
elif self.model.external_path is not None or \
self.vtype == str:
if self.format.array_free_format:
self._how = "openclose"
else:
self._how = "external"
else:
self._how = "internal"
def plot(self, title=None, filename_base=None, file_extension=None,
fignum=None, **kwargs):
"""
Plot 2-D model input data
Parameters
----------
title : str
Plot title. If a plot title is not provide one will be
created based on data name (self.name). (default is None)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.dis.top.plot()
"""
import flopy.plot.plotutil as pu
if title is None:
title = self.name
if file_extension is not None:
fext = file_extension
else:
fext = 'png'
filename = None
if filename_base is not None:
filename = '{}_{}.{}'.format(filename_base, self.name, fext)
return pu._plot_array_helper(self.array, self.model,
names=title, filenames=filename,
fignum=fignum, **kwargs)
def export(self, f, **kwargs):
from flopy import export
return export.utils.util2d_helper(f, self, **kwargs)
def to_shapefile(self, filename):
"""
Export 2-D model data to a shapefile (as polygons) of self.array
Parameters
----------
filename : str
Shapefile name to write
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.dis.top.as_shapefile('test_top.shp')
"""
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()")
# from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name
# name = shape_attr_name(self.name, keep_layer=True)
# write_grid_shapefile(filename, self.model.dis.sr, {name: self.array})
self.export(filename)
def set_fmtin(self, fmtin):
self._format = ArrayFormat(self, fortran=fmtin,
array_free_format=self.format.array_free_format)
def get_value(self):
return copy.deepcopy(self.__value)
# overloads, tries to avoid creating arrays if possible
def __add__(self, other):
if self.vtype in [np.int, np.float32] and self.vtype == other.vtype:
return self.__value + other.get_value()
else:
return self.array + other.array
def __sub__(self, other):
if self.vtype in [np.int, np.float32] and self.vtype == other.vtype:
return self.__value - other.get_value()
else:
return self.array - other.array
def __mul__(self, other):
if np.isscalar(other):
return Util2d(self.model, self.shape, self.dtype,
self._array * other, self.name,
self.format.fortran, self.cnstnt, self.iprn,
self.ext_filename,
self.locat, self.format.binary)
else:
raise NotImplementedError(
"Util2d.__mul__() not implemented for non-scalars")
def __eq__(self, other):
if not isinstance(other, Util2d):
return False
if not np.array_equal(other.array, self.array):
return False
if other.cnstnt != self.cnstnt:
return False
return True
def __getitem__(self, k):
if isinstance(k, int):
if len(self.shape) == 1:
return self.array[k]
elif self.shape[0] == 1:
return self.array[0, k]
elif self.shape[1] == 1:
return self.array[k, 0]
else:
raise Exception(
"Util2d.__getitem__() error: an integer was passed, " +
"self.shape > 1 in both dimensions")
else:
if isinstance(k, tuple):
if len(k) == 2:
return self.array[k[0], k[1]]
if len(k) == 1:
return self.array[k]
else:
return self.array[(k,)]
def __setitem__(self, k, value):
"""
this one is dangerous because it resets __value
"""
a = self.array
a[k] = value
a = a.astype(self.dtype)
self.__value = a
if self.__value_built is not None:
self.__value_built = None
def __setattr__(self, key, value):
if key == "fmtin":
self._format = ArrayFormat(self, fortran=value)
elif key == "format":
assert isinstance(value, ArrayFormat)
self._format = value
elif key == "how":
value = value.lower()
assert value in self._acceptable_hows
self._how = value
else:
super(Util2d, self).__setattr__(key, value)
def all(self):
return self.array.all()
def __len__(self):
return self.shape[0]
def sum(self):
return self.array.sum()
def unique(self):
return np.unique(self.array)
@property
def format(self):
# don't return a copy because we want to allow
# access to the attributes of ArrayFormat
return self._format
@property
def how(self):
return copy.copy(self._how)
@property
def vtype(self):
return type(self.__value)
@property
def python_file_path(self):
"""
where python is going to write the file
Returns
-------
file_path (str) : path relative to python: includes model_ws
"""
# if self.vtype != str:
# raise Exception("Util2d call to python_file_path " +
# "for vtype != str")
python_file_path = ''
if self.model.model_ws != '.':
python_file_path = os.path.join(self.model.model_ws)
if self.model.external_path is not None:
python_file_path = os.path.join(python_file_path,
self.model.external_path)
python_file_path = os.path.join(python_file_path,
self.filename)
return python_file_path
@property
def filename(self):
if self.vtype != str:
if self.ext_filename is not None:
filename = os.path.split(self.ext_filename)[-1]
else:
filename = os.path.split(self._ext_filename)[-1]
else:
filename = os.path.split(self.__value)[-1]
return filename
@property
def model_file_path(self):
"""
where the model expects the file to be
Returns
-------
file_path (str): path relative to the name file
"""
model_file_path = ''
if self.model.external_path is not None:
model_file_path = os.path.join(model_file_path,
self.model.external_path)
model_file_path = os.path.join(model_file_path, self.filename)
return model_file_path
def get_constant_cr(self, value):
if self.format.array_free_format:
lay_space = '{0:>27s}'.format('')
if self.vtype in [int, np.int]:
lay_space = '{0:>32s}'.format('')
cr = 'CONSTANT ' + self.format.py[1].format(value)
cr = '{0:s}{1:s}#{2:<30s}\n'.format(cr, lay_space,
self.name)
else:
cr = self._get_fixed_cr(0, value=value)
return cr
def _get_fixed_cr(self, locat, value=None):
fformat = self.format.fortran
if value is None:
value = self.cnstnt
if self.format.binary:
if locat is None:
raise Exception("Util2d._get_fixed_cr(): locat is None but"+\
"format is binary")
if not self.format.array_free_format:
locat = -1 * np.abs(locat)
if locat is None:
locat = 0
if locat is 0:
fformat = ''
if self.dtype == np.int:
cr = '{0:>10.0f}{1:>10.0f}{2:>19s}{3:>10.0f} #{4}\n' \
.format(locat, value, fformat,
self.iprn, self.name)
elif self.dtype == np.float32:
cr = '{0:>10.0f}{1:>10.5G}{2:>19s}{3:>10.0f} #{4}\n' \
.format(locat, value, fformat,
self.iprn, self.name)
else:
raise Exception('Util2d: error generating fixed-format ' +
' control record, dtype must be np.int or np.float32')
return cr
def get_internal_cr(self):
if self.format.array_free_format:
cr = 'INTERNAL {0:15} {1:>10s} {2:2.0f} #{3:<30s}\n' \
.format(self.cnstnt_str, self.format.fortran, self.iprn, self.name)
return cr
else:
return self._get_fixed_cr(self.locat)
@property
def cnstnt_str(self):
if isinstance(self.cnstnt,str):
return self.cnstnt
else:
return "{0:15.6G}".format(self.cnstnt)
def get_openclose_cr(self):
cr = 'OPEN/CLOSE {0:>30s} {1:15} {2:>10s} {3:2.0f} {4:<30s}\n'.format(
self.model_file_path, self.cnstnt_str,
self.format.fortran, self.iprn,
self.name)
return cr
def get_external_cr(self):
locat = self.model.next_ext_unit()
#if self.format.binary:
# locat = -1 * np.abs(locat)
self.model.add_external(self.model_file_path, locat,
self.format.binary)
if self.format.array_free_format:
cr = 'EXTERNAL {0:>30d} {1:15} {2:>10s} {3:2.0f} {4:<30s}\n'.format(
locat, self.cnstnt_str,
self.format.fortran, self.iprn,
self.name)
return cr
else:
return self._get_fixed_cr(locat)
def get_file_entry(self, how=None):
if how is not None:
how = how.lower()
else:
how = self._how
if not self.format.array_free_format and self.format.free:
print("Util2d {0}: can't be free format...resetting".format(
self.name))
self.format.free = False
if not self.format.array_free_format and self.how == "internal" and self.locat is None:
print("Util2d {0}: locat is None, but ".format(self.name) + \
"model does not " + \
"support free format and how is internal..." + \
"resetting how = external")
how = "external"
if (self.format.binary or self.model.external_path) \
and how in ["constant", "internal"]:
print("Util2d:{0}: ".format(self.name) + \
"resetting 'how' to external")
if self.format.array_free_format:
how = "openclose"
else:
how = "external"
if how == "internal":
assert not self.format.binary, "Util2d error: 'how' is internal, but" + \
"format is binary"
cr = self.get_internal_cr()
return cr + self.string
elif how == "external" or how == "openclose":
if how == "openclose":
assert self.format.array_free_format, "Util2d error: 'how' is openclose," + \
"but model doesn't support free fmt"
# write a file if needed
if self.vtype != str:
if self.format.binary:
self.write_bin(self.shape, self.python_file_path,
self._array,
bintype="head")
else:
self.write_txt(self.shape, self.python_file_path,
self._array,
fortran_format=self.format.fortran)
elif self.__value != self.python_file_path:
if os.path.exists(self.python_file_path):
# if the file already exists, remove it
if self.model.verbose:
print("Util2d warning: removing existing array " +
"file {0}".format(self.model_file_path))
try:
os.remove(self.python_file_path)
except Exception as e:
raise Exception(
"Util2d: error removing existing file " + \
self.python_file_path)
# copy the file to the new model location
try:
shutil.copy2(self.__value, self.python_file_path)
except Exception as e:
raise Exception("Util2d.get_file_array(): error copying " +
"{0} to {1}:{2}".format(self.__value,
self.python_file_path,
str(e)))
if how == "external":
return self.get_external_cr()
else:
return self.get_openclose_cr()
elif how == "constant":
if self.vtype not in [int, np.float32]:
u = np.unique(self._array)
assert u.shape[
0] == 1, "Util2d error: 'how' is constant, but array " + \
"is not uniform"
value = u[0]
else:
value = self.__value
return self.get_constant_cr(value)
else:
raise Exception("Util2d.get_file_entry() error: " + \
"unrecognized 'how':{0}".format(how))
@property
def string(self):
"""
get the string representation of value attribute
Note:
the string representation DOES NOT include the effects of the control
record multiplier - this method is used primarily for writing model input files
"""
# convert array to sting with specified format
a_string = self.array2string(self.shape, self._array,
python_format=self.format.py)
return a_string
@property
def array(self):
"""
Get the COPY of array representation of value attribute with the
effects of the control record multiplier applied.
Returns
-------
array : numpy.ndarray
Copy of the array with the multiplier applied.
Note
----
.array is a COPY of the array representation as seen by the
model - with the effects of the control record multiplier applied.
"""
if isinstance(self.cnstnt,str):
print("WARNING: cnstnt is str for {0}".format(self.name))
return self._array.astype(self.dtype)
if isinstance(self.cnstnt, int):
cnstnt = self.cnstnt
else:
if self.cnstnt == 0.0:
cnstnt = 1.0
else:
cnstnt = self.cnstnt
# return a copy of self._array since it is being
# multiplied
return (self._array * cnstnt).astype(self.dtype)
@property
def _array(self):
"""
get the array representation of value attribute
if value is a string or a constant, the array is loaded/built only once
Note:
the return array representation DOES NOT include the effect of the multiplier
in the control record. To get the array as the model sees it (with the multiplier applied),
use the Util2d.array method.
"""
if self.vtype == str:
if self.__value_built is None:
file_in = open(self.__value, 'r')
if self.format.binary:
header, self.__value_built = Util2d.load_bin(self.shape,
file_in,
self.dtype,
bintype="head")
else:
self.__value_built = Util2d.load_txt(self.shape, file_in,
self.dtype,
self.format.fortran).astype(
self.dtype)
file_in.close()
return self.__value_built
elif self.vtype != np.ndarray:
if self.__value_built is None:
self.__value_built = np.ones(self.shape, dtype=self.dtype) \
* self.__value
return self.__value_built
else:
return self.__value
@staticmethod
def load_block(shape, file_in, dtype):
"""
load a (possibly wrapped format) array from a mt3d block
(self.__value) and casts to the proper type (self.dtype)
made static to support the load functionality
this routine now supports fixed format arrays where the numbers
may touch.
"""
nrow, ncol = shape
data = np.zeros(shape, dtype=dtype) + np.NaN
if not hasattr(file_in, 'read'):
file_in = open(file_in, 'r')
line = file_in.readline()
raw = line.strip('\n').split()
nblock = int(raw[0])
for n in range(nblock):
line = file_in.readline()
raw = line.strip('\n').split()
i1, i2, j1, j2, v = int(raw[0])-1, int(raw[1])-1, \
int(raw[2])-1, int(raw[3])-1, \
dtype(raw[4])
for j in range(j1, j2+1):
for i in range(i1, i2+1):
data[i, j] = v
if np.isnan(np.sum(data)):
raise Exception("Util2d.load_block() error: np.NaN in data array")
return data
@staticmethod
def load_txt(shape, file_in, dtype, fmtin):
"""
load a (possibly wrapped format) array from a file
(self.__value) and casts to the proper type (self.dtype)
made static to support the load functionality
this routine now supports fixed format arrays where the numbers
may touch.
"""
# file_in = open(self.__value,'r')
# file_in = open(filename,'r')
# nrow,ncol = self.shape
nrow, ncol = shape
npl, fmt, width, decimal = ArrayFormat.decode_fortran_descriptor(fmtin)
data = np.zeros((nrow * ncol), dtype=dtype) + np.NaN
d = 0
if not hasattr(file_in, 'read'):
file_in = open(file_in, 'r')
while True:
line = file_in.readline()
if line in [None, ''] or d == nrow * ncol:
break
if npl == 'free':
raw = line.strip('\n').split()
if len(raw) == 1 and ',' in line:
raw = raw[0].split(',')
elif ',' in line:
raw = line.replace(',', '').strip('\n').split()
elif '*' in line:
rawins = []
rawremove = []
for idx, t in enumerate(raw):
if '*' in t:
#print(t)
rawremove.append(t)
tt = t.split('*')
tlist = []
for jdx in range(int(tt[0])):
tlist.append(tt[1])
rawins.append((idx, list(tlist)))
iadd = 1
for t in rawins:
ipos = t[0] + iadd
for tt in t[1]:
raw.insert(ipos, tt)
ipos += 1
iadd += 1
raw = [e for e in raw if e not in rawremove]
else:
# split line using number of values in the line
rawlist = []
istart = 0
istop = width
for i in range(npl):
txtval = line[istart:istop]
if txtval.strip() != '':
rawlist.append(txtval)
else:
break
istart = istop
istop += width
raw = rawlist
for a in raw:
try:
data[d] = dtype(a)
except:
raise Exception('Util2d:unable to cast value: ' +
str(a) + ' to type:' + str(dtype))
if d == (nrow * ncol) - 1:
assert len(data) == (nrow * ncol)
data.resize(nrow, ncol)
return data
d += 1
# file_in.close()
if np.isnan(np.sum(data)):
raise Exception("Util2d.load_txt() error: np.NaN in data array")
data.resize(nrow, ncol)
return data
@staticmethod
def write_txt(shape, file_out, data, fortran_format="(FREE)",
python_format=None):
if fortran_format.upper() == '(FREE)' and python_format is None:
np.savetxt(file_out, data,
ArrayFormat.get_default_numpy_fmt(data.dtype),
delimiter='')
return
if not hasattr(file_out, "write"):
file_out = open(file_out, 'w')
file_out.write(
Util2d.array2string(shape, data, fortran_format=fortran_format,
python_format=python_format))
@staticmethod
def array2string(shape, data, fortran_format="(FREE)",
python_format=None):
"""
return a string representation of
a (possibly wrapped format) array from a file
(self.__value) and casts to the proper type (self.dtype)
made static to support the load functionality
this routine now supports fixed format arrays where the numbers
may touch.
"""
if len(shape) == 2:
nrow, ncol = shape
else:
nrow = 1
ncol = shape[0]
data = np.atleast_2d(data)
if python_format is None:
column_length, fmt, width, decimal = \
ArrayFormat.decode_fortran_descriptor(fortran_format)
if decimal is None:
output_fmt = '{0}0:{1}{2}{3}'.format('{', width, 'd', '}')
else:
output_fmt = '{0}0:{1}.{2}{3}{4}'.format('{', width, decimal,
fmt, '}')
else:
try:
column_length, output_fmt = int(python_format[0]), \
python_format[1]
except:
raise Exception('Util2d.write_txt: \nunable to parse'
+ 'python_format:\n {0}\n'.
format(python_format)
+ ' python_format should be a list with\n'
+ ' [column_length, fmt]\n'
+ ' e.g., [10, {0:10.2e}]')
if ncol % column_length == 0:
linereturnflag = False
else:
linereturnflag = True
# write the array to a string
s = ""
for i in range(nrow):
icol = 0
for j in range(ncol):
try:
s = s + output_fmt.format(data[i, j])
except Exception as e:
raise Exception("error writing array value" + \
"{0} at r,c [{1},{2}]\n{3}".format(
data[i, j], i, j, str(e)))
if (j + 1) % column_length == 0.0 and (j != 0 or ncol == 1):
s += '\n'
if linereturnflag:
s += '\n'
return s
@staticmethod
def load_bin(shape, file_in, dtype, bintype=None):
import flopy.utils.binaryfile as bf
nrow, ncol = shape
if bintype is not None and not np.issubdtype(dtype, np.int):
header_dtype = bf.BinaryHeader.set_dtype(bintype=bintype)
header_data = np.fromfile(file_in, dtype=header_dtype, count=1)
else:
header_data = None
data = np.fromfile(file_in, dtype=dtype, count=nrow * ncol)
data.resize(nrow, ncol)
return [header_data, data]
@staticmethod
def write_bin(shape, file_out, data, bintype=None, header_data=None):
if not hasattr(file_out, 'write'):
file_out = open(file_out, 'wb')
dtype = data.dtype
if dtype.kind != 'i':
if bintype is not None:
if header_data is None:
header_data = BinaryHeader.create(bintype=bintype,
nrow=shape[0],
ncol=shape[1])
if header_data is not None:
header_data.tofile(file_out)
data.tofile(file_out)
return
def parse_value(self, value):
"""
parses and casts the raw value into an acceptable format for __value
lot of defense here, so we can make assumptions later
"""
if isinstance(value, list):
value = np.array(value)
if isinstance(value, bool):
if self.dtype == np.bool:
try:
self.__value = np.bool(value)
except:
raise Exception('Util2d:could not cast ' +
'boolean value to type "np.bool": ' +
str(value))
else:
raise Exception('Util2d:value type is bool, ' +
' but dtype not set as np.bool')
elif isinstance(value, str):
if os.path.exists(value):
self.__value = value
return
elif self.dtype == np.int:
try:
self.__value = int(value)
except:
raise Exception("Util2d error: str not a file and " +
"couldn't be cast to int: {0}".format(
value))
else:
try:
self.__value = float(value)
except:
raise Exception("Util2d error: str not a file and " +
"couldn't be cast to float: {0}".format(
value))
elif np.isscalar(value):
if self.dtype == np.int:
try:
self.__value = np.int(value)
except:
raise Exception('Util2d:could not cast scalar ' +
'value to type "int": ' + str(value))
elif self.dtype == np.float32:
try:
self.__value = np.float32(value)
except:
raise Exception('Util2d:could not cast ' +
'scalar value to type "float": ' +
str(value))
elif isinstance(value, np.ndarray):
# if value is 3d, but dimension 1 is only length 1,
# then drop the first dimension
if len(value.shape) == 3 and value.shape[0] == 1:
value = value[0]
if self.shape != value.shape:
raise Exception('Util2d:self.shape: ' + str(self.shape) +
' does not match value.shape: ' +
str(value.shape))
if self.dtype != value.dtype:
value = value.astype(self.dtype)
self.__value = value
else:
raise Exception('Util2d:unsupported type in util_array: ' +
str(type(value)))
@staticmethod
def load(f_handle, model, shape, dtype, name, ext_unit_dict=None,
array_free_format=None,array_format="modflow"):
"""
functionality to load Util2d instance from an existing
model input file.
external and internal record types must be fully loaded
if you are using fixed format record types,make sure
ext_unit_dict has been initialized from the NAM file
"""
if shape == (0, 0):
raise IndexError('No information on model grid dimensions. '
'Need nrow, ncol to load a Util2d array.')
curr_unit = None
if ext_unit_dict is not None:
# determine the current file's unit number
cfile = f_handle.name
for cunit in ext_unit_dict:
if cfile == ext_unit_dict[cunit].filename:
curr_unit = cunit
break
# Allows for special MT3D array reader
#array_format = None
#if hasattr(model, 'array_format'):
# array_format = model.array_format
cr_dict = Util2d.parse_control_record(f_handle.readline(),
current_unit=curr_unit,
dtype=dtype,
ext_unit_dict=ext_unit_dict,
array_format=array_format)
if cr_dict['type'] == 'constant':
u2d = Util2d(model, shape, dtype, cr_dict['cnstnt'], name=name,
iprn=cr_dict['iprn'], fmtin="(FREE)",
array_free_format=array_free_format)
elif cr_dict['type'] == 'open/close':
# clean up the filename a little
fname = cr_dict['fname']
fname = fname.replace("'", "")
fname = fname.replace('"', '')
fname = fname.replace('\'', '')
fname = fname.replace('\"', '')
fname = fname.replace('\\', os.path.sep)
fname = os.path.join(model.model_ws, fname)
# load_txt(shape, file_in, dtype, fmtin):
assert os.path.exists(fname), "Util2d.load() error: open/close " + \
"file " + str(fname) + " not found"
if str('binary') not in str(cr_dict['fmtin'].lower()):
f = open(fname, 'r')
data = Util2d.load_txt(shape=shape,
file_in=f,
dtype=dtype, fmtin=cr_dict['fmtin'])
else:
f = open(fname, 'rb')
header_data, data = Util2d.load_bin(shape, f, dtype,
bintype='Head')
f.close()
u2d = Util2d(model, shape, dtype, data, name=name,
iprn=cr_dict['iprn'], fmtin="(FREE)",
cnstnt=cr_dict['cnstnt'],
array_free_format=array_free_format)
elif cr_dict['type'] == 'internal':
data = Util2d.load_txt(shape, f_handle, dtype, cr_dict['fmtin'])
u2d = Util2d(model, shape, dtype, data, name=name,
iprn=cr_dict['iprn'], fmtin="(FREE)",
cnstnt=cr_dict['cnstnt'], locat=None,
array_free_format=array_free_format)
elif cr_dict['type'] == 'external':
if str('binary') not in str(cr_dict['fmtin'].lower()):
assert cr_dict['nunit'] in list(ext_unit_dict.keys())
data = Util2d.load_txt(shape,
ext_unit_dict[
cr_dict['nunit']].filehandle,
dtype, cr_dict['fmtin'])
else:
if cr_dict['nunit'] not in list(ext_unit_dict.keys()):
cr_dict["nunit"] *= -1
assert cr_dict['nunit'] in list(ext_unit_dict.keys())
header_data, data = Util2d.load_bin(
shape, ext_unit_dict[cr_dict['nunit']].filehandle, dtype,
bintype='Head')
u2d = Util2d(model, shape, dtype, data, name=name,
iprn=cr_dict['iprn'], fmtin="(FREE)",
cnstnt=cr_dict['cnstnt'],
array_free_format=array_free_format)
# track this unit number so we can remove it from the external
# file list later
model.pop_key_list.append(cr_dict['nunit'])
elif cr_dict['type'] == 'block':
data = Util2d.load_block(shape, f_handle, dtype)
u2d = Util2d(model, shape, dtype, data, name=name,
iprn=cr_dict['iprn'], fmtin="(FREE)",
cnstnt=cr_dict['cnstnt'], locat=None,
array_free_format=array_free_format)
return u2d
@staticmethod
def parse_control_record(line, current_unit=None, dtype=np.float32,
ext_unit_dict=None, array_format=None):
"""
parses a control record when reading an existing file
rectifies fixed to free format
current_unit (optional) indicates the unit number of the file being parsed
"""
free_fmt = ['open/close', 'internal', 'external', 'constant']
raw = line.strip().split()
freefmt, cnstnt, fmtin, iprn, nunit = None, None, None, -1, None
fname = None
isfloat = False
if dtype == np.float or dtype == np.float32:
isfloat = True
# if free format keywords
if str(raw[0].lower()) in str(free_fmt):
freefmt = raw[0].lower()
if raw[0].lower() == 'constant':
if isfloat:
cnstnt = np.float(raw[1].lower().replace('d', 'e'))
else:
cnstnt = np.int(raw[1].lower())
if raw[0].lower() == 'internal':
if isfloat:
cnstnt = np.float(raw[1].lower().replace('d', 'e'))
else:
cnstnt = np.int(raw[1].lower())
fmtin = raw[2].strip()
iprn = int(raw[3])
elif raw[0].lower() == 'external':
if ext_unit_dict is not None:
try:
# td = ext_unit_dict[int(raw[1])]
fname = ext_unit_dict[int(raw[1])].filename.strip()
except:
pass
nunit = int(raw[1])
if isfloat:
cnstnt = np.float(raw[2].lower().replace('d', 'e'))
else:
cnstnt = np.int(raw[2].lower())
fmtin = raw[3].strip()
iprn = int(raw[4])
elif raw[0].lower() == 'open/close':
fname = raw[1].strip()
if isfloat:
cnstnt = np.float(raw[2].lower().replace('d', 'e'))
else:
cnstnt = np.int(raw[2].lower())
fmtin = raw[3].strip()
iprn = int(raw[4])
npl, fmt, width, decimal = None, None, None, None
else:
locat = np.int(line[0:10].strip())
if isfloat:
if len(line) >= 20:
cnstnt = np.float(
line[10:20].strip().lower().replace('d', 'e'))
else:
cnstnt = 0.0
else:
if len(line) >= 20:
cnstnt = np.int(line[10:20].strip())
else:
cnstnt = 0
#if cnstnt == 0:
# cnstnt = 1
if locat != 0:
if len(line) >= 40:
fmtin = line[20:40].strip()
else:
fmtin = ''
try:
iprn = np.int(line[40:50].strip())
except:
iprn = 0
# locat = int(raw[0])
# cnstnt = float(raw[1])
# fmtin = raw[2].strip()
# iprn = int(raw[3])
if locat == 0:
freefmt = 'constant'
elif locat < 0:
freefmt = 'external'
nunit = np.int(locat) * -1
fmtin = '(binary)'
elif locat > 0:
# if the unit number matches the current file, it's internal
if locat == current_unit:
freefmt = 'internal'
else:
freefmt = 'external'
nunit = np.int(locat)
# Reset for special MT3D control flags
if array_format == 'mt3d':
if locat == 100:
freefmt = 'internal'
nunit = current_unit
elif locat == 101:
freefmt = 'block'
nunit = current_unit
elif locat == 102:
raise NotImplementedError(
'MT3D zonal format not supported...')
elif locat == 103:
freefmt = 'internal'
nunit = current_unit
fmtin = '(free)'
cr_dict = {}
cr_dict['type'] = freefmt
cr_dict['cnstnt'] = cnstnt
cr_dict['nunit'] = nunit
cr_dict['iprn'] = iprn
cr_dict['fmtin'] = fmtin
cr_dict['fname'] = fname
return cr_dict
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from artman.tasks import io_tasks
_UPLOAD_LIMIT = 123
class ValidateUploadSizeTest(unittest.TestCase):
def test_validate_upload_size_ok(self):
io_tasks._validate_upload_size(_UPLOAD_LIMIT, _UPLOAD_LIMIT)
def test_validate_upload_size_bad(self):
self.assertRaises(
ValueError, io_tasks._validate_upload_size,
_UPLOAD_LIMIT + 1, _UPLOAD_LIMIT)
|
import socket
from urlparse import urlparse
import re
import sys
class HttpResponse:
def __init__(self, headerString):
self.init()
self.parse(headerString)
def init(self):
#Contain the header parameters
self.mHeaders = dict()
self.mResponseCode = -1
self.mResponseMessage = ""
self.mHttpVersion = 0
def __contains__(self, item):
return self.mHeaders.has_key(item)
def getHeader(self, headerName):
if self.mHeaders.has_key(headerName):
return self.mHeaders[headerName]
else:
return False
def getResponse(self):
return (self.mResponseCode, self.mResponseMessage)
def getHttpVersion(self):
return self.mHttpVersion
def parse(self, response):
#Split on \r\n
splits = re.split("\r\n", response)
httpFound = False
for split in splits:
#If we have not found the first line
if not httpFound:
#Check if the line matches the first line of an HTTP request
if re.match("HTTP\\/1\\.(0|1) [0-9]{1,3} .+", split):
httpFound = True
versNo = split[5:8].strip()
statusCode = split[9:12].strip()
statusName = split[13:].strip()
if versNo == "1.0":
self.mHttpVersion = 10
elif versNo == "1.1":
self.mHttpVersion = 11
elif versNo == "2.0":
self.mHttpVersion = 20
self.mResponseCode = int(statusCode)
self.mResponseMessage = statusName
else:
continue
#If we have found the first line
else:
#We should be able to split on ":"
if re.match(".*:.*", split):
headerSplit = re.split(":", split)
left = headerSplit[0].strip()
right = ""
#There might be more than one ":", just concatenate
for i in range(1, len(headerSplit)):
if i == 1:
right = headerSplit[i]
else:
right = right + ":" + headerSplit[i]
right = right.strip()
self.mHeaders[left] = right
class HttpRequest:
def __init__(self, method, url, http1_1 = False):
self.init(method, url, http1_1)
def init(self, method, url, http1_1):
self.mHeaders = dict()
#If the url is empty, we need to add a / for root
if not url[2]:
self.setUrl("/")
else:
self.setUrl(url[2])
self.setHost(url[1])
self.setMethod(str(method).strip())
self.setHttp1_1(http1_1)
def setHeader(self, headerName, headerValue):
self.mHeaders[headerName] = headerValue
def clearHeaders(self):
self.mHeaders.clear()
def setHost(self, host):
self.setHeader("Host", host)
def setUrl(self, url):
self.mUrl = url
def setMethod(self, method):
self.mMethod = method
def setHttp1_1(self, http1_1):
if http1_1:
self.mVersion = "1.1"
else:
self.mVersion = "1.0"
def getHeaderQuery(self):
toRet = ""
for key, value in self.mHeaders.iteritems():
toRet = toRet + str(key) + ": " + str(value) + "\r\n"
#self.printResponse(oRet
return toRet
def getIdentifierQuery(self):
toRet = ""
toRet = toRet + str(self.mMethod) + " " + self.mUrl + " HTTP/" + self.mVersion + "\r\n"
#self.printResponse(oRet
return toRet
def getHttpRequest(self):
toRet = self.getIdentifierQuery()
toRet = toRet + self.getHeaderQuery()
toRet = toRet + "\r\n\r\n"
#self.printResponse(oRet
return toRet
class HttpClient:
def __init__(self, address, followRedirects):
if not self.setAddress(address):
return
self.mInvalid = False
self.mSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.mPort = 80
self.mRedirectDepth = 0
self.mHeaderCompleteRe = re.compile('HTTP\\/1\\.(0|1)(.*\\r\\n)(.*:.*\\r\\n)+\\r\\n(\\r\\n|.)*')
self.mHeaderFilterRe = re.compile('HTTP\\/1\\.(0|1)(.*\\r\\n)(.*:.*\\r\\n)+\\r\\n')
self.mFollowRedirects = followRedirects
def setAddress(self, address):
if not re.match("http(s)?:\\/\\/.*", address):
#Add http is there is nothing in front of the url
if not re.match(".+:\\/\\/.*", address):
address = "http://" + address
#If something else with an // stands before the URL we need to stop (for instance ftp://johndoe.com)
else:
self.mInvalid = True
self.printResponse("Invalid url, aborting.")
return False
self.mAddress = address
self.mUrl = urlparse(address)
return True
def open(self):
ip = socket.gethostbyname(self.mUrl[1])
self.mSocket.connect((str(ip), self.mPort))
def close(self):
self.mSocket.close()
self.mSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def getResponse(self, http1_1 = True):
if self.mInvalid:
return
try:
#Open the socket
self.open()
#Create the get request
request = self.createRequest(self.mUrl, http1_1)
#Get the text form
request = request.getHttpRequest()
#Send the request to the socket
self.mSocket.send(request)
except:
print "Exception occured when sending GET request, aborting."
return
header = ""
body = ""
response = False
headerComplete = False
while True:
try:
temp = self.mSocket.recv(1024)
except:
print "Exception occured when receiving data, output might be incorrect."
temp = False
if not temp:
break
if headerComplete:
body = body + str(temp)
if len(body) >= int(response.getHeader("Content-Length")):
break
else:
header = header + temp
if self.mHeaderCompleteRe.match(header):
headerComplete = True
#For some reason, part of the regex gets returned, so we need index 4 as body
body = self.mHeaderFilterRe.split(header)[4]
#Header is the whole data - length of body
if body:
header = header[0:len(header) - len(body)]
else:
body = ""
response = HttpResponse(header)
else:
self.printResponse('Header does not match.')
print header
print body
self.close()
self.evalResponse(response, http1_1)
def evalResponse(self, response, http1_1):
if response:
if response.getHttpVersion() == 11 and response.getResponse()[0] == 400 and not http1_1:
self.printResponse("GET Request failed, Error 400, Bad Request, server is using HTTP/1.1, trying HTTP/1.1 request now.")
self.getResponse(True)
else:
responseInfo = response.getResponse()
responseCode = responseInfo[0]
if responseCode >= 100 and responseCode < 200:
self.parse1XXResponse(response, responseCode)
elif responseCode >= 200 and responseCode < 300:
self.parse2XXResponse(response, responseCode)
elif responseCode >= 300 and responseCode < 400:
self.parse3XXResponse(response, responseCode)
elif responseCode >= 400 and responseCode < 500:
self.parse4XXResponse(response, responseCode)
elif responseCode >= 500 and responseCode < 600:
self.parse5XXResponse(response, responseCode)
else:
self.printResponse("GET Request failed, got back an invalid response.")
self.printResponse("No server information was found in the response.")
def printGETRequestFailed(self, response):
#I used to print the error code here because of different interpretation of assignment
#print "GET Request failed Error " + str(response.getResponse()[0]) + ": " + response.getResponse()[1]
self.printServerResponse(response)
def printServerResponse(self, response):
if "Server" in response:
self.printResponse("Server is using: " + response.getHeader("Server"))
else:
self.printResponse("No server information was found in the response.")
def printResponse(self, message):
spaces = ""
for i in range(0, self.mRedirectDepth):
spaces = spaces + " "
print spaces + message
def parse1XXResponse(self, response, responseCode):
self.printGETRequestFailed(response)
def parse2XXResponse(self, response, responseCode):
if responseCode == 200:
#Different interpretation of assignment
#self.printResponse("GET Request was successful, server returned content.")
self.printServerResponse(response)
else:
self.printGETRequestFailed(response)
def parse3XXResponse(self, response, responseCode):
if "Location" in response and self.mFollowRedirects:
location = response.getHeader("Location")
path = self.mUrl[2]
if path:
if location.endswith("/") and path.startswith("/"):
location = location + path[1:]
else:
location = location + path
self.setAddress(location)
if self.mUrl[0] == "http":
if self.mRedirectDepth < 5:
self.printResponse("Server gave an 3XX response, following redirect to: " + location)
self.printServerResponse(response)
self.mRedirectDepth = self.mRedirectDepth + 1
self.getResponse()
else:
self.printResponse("We have been redirected 5 times, we're probably in a loop, stopped following redirects.")
self.printGETRequestFailed(response)
else:
self.printResponse("Redirect scheme is invalid (" + self.mUrl[0] + "), aborting.")
self.printGETRequestFailed(response)
else:
self.printGETRequestFailed(response)
def parse4XXResponse(self, response, responseCode):
self.printGETRequestFailed(response)
def parse5XXResponse(self, response, responseCode):
self.printGETRequestFailed(response)
def createRequest(self, address, http1_1 = False):
request = HttpRequest("GET", address, http1_1)
self.setParameters(request)
return request
def setParameters(self, request):
request.setHeader("Accept", "text/html")
request.setHeader("Connection", "close")
if len(sys.argv) == 2:
#Second param indicates if we should follow redirects
client = HttpClient(sys.argv[1], False)
client.getResponse()
else:
exit()
|
import configparser,os,copy
from pathlib import Path
SERVER_SECTION= 'server'
CLIENT_SECTION= 'client'
def write_back(func):
def inner_func(*args,**kwargs):
GuiConfigs.need_write_back=True
func(*args,**kwargs)
return inner_func
class GuiConfigs():
need_write_back=False
DICT_HOST = "dict host"
DICT_PORT = "dict port"
#HTTP_SCHEME = "dict scheme"
HTTP_HOST = "http host"
HTTP_PORT = "http port"
PROTOCOL = "protocol"
SOUND_PLAYER = 'sound player'
ZOOM_FACTOR='zoom factor'
BG_COLOR='background color'
WELCOME_WORD='welcome word'
@classmethod
def check_config_file(cls,file_path):
return os.path.exists(file_path)
@classmethod
def generate_init_configs(cls,file_path):
#index_folder = DEFAULT_CONFIG_PATH.parent.joinpath("index")
configs = configparser.ConfigParser()
configs[SERVER_SECTION] = {
cls.DICT_HOST: 'localhost',
cls.DICT_PORT: 9999,
cls.PROTOCOL: "http",
cls.HTTP_HOST: "localhost",
cls.HTTP_PORT: 8000,
}
configs[CLIENT_SECTION] = {
cls.SOUND_PLAYER: "mpv",
cls.BG_COLOR: 'white',
cls.ZOOM_FACTOR: 1.0,
cls.WELCOME_WORD: 'Welcome to mmDict'
}
with open(file_path, "w") as f:
configs.write(f)
return file_path
def __init__(self,file_path):
self.config_path=file_path
self.config=configparser.ConfigParser()
self.config.read(file_path)
#self.ori_config=copy.deepcopy(self.config)
def save(self):
#if self.config is self.ori_config:
# return
if not GuiConfigs.need_write_back:
return
print("config write back")
with open(self.config_path, "w") as f:
self.config.write(f)
def get_dict_server(self):
host,port= self.config[SERVER_SECTION].get(self.DICT_HOST), self.config[SERVER_SECTION].get(self.DICT_PORT)
if host and port:
return host,int(port)
else:
raise Exception("Dict host or port is not set.")
def get_http_server(self):
protocol,host,port=self.config[SERVER_SECTION].get(self.PROTOCOL),self.config[SERVER_SECTION].get(self.HTTP_HOST), self.config[SERVER_SECTION].get(self.HTTP_PORT)
if protocol and host and port:
return protocol, host,int(port)
else:
raise Exception("Http host or port is not set.")
def get_sound_player(self):
return self.config[SERVER_SECTION].get(self.SOUND_PLAYER, 'mpv')
def set_server_config(self,config_dict):
for key,val in config_dict.items():
self.set_server_value(key,val)
def set_client_config(self, config_dict):
for key, val in config_dict.items():
self.set_client_value(key, val)
def set_server_value(self,key,value):
self.config[SERVER_SECTION][key]=str(value)
def set_client_value(self,key,value):
self.config[CLIENT_SECTION][key]=str(value)
def get_server_value(self,key):
return self.config[SERVER_SECTION][key]
def get_client_value(self,key):
return self.config[CLIENT_SECTION][key]
def __set_value_both(self,key,val):
try:
self.config[CLIENT_SECTION][key]=val
except:
self.config[CLIENT_SECTION]={
key:val
}
try:
self.ori_config[CLIENT_SECTION][key]=val
except:
self.ori_config[CLIENT_SECTION]={
key:val
}
@write_back
def set_zoom_factor(self,val):
self.__set_value_both(self.ZOOM_FACTOR,val)
def get_zoom_factor(self):
try:
return float(self.config[CLIENT_SECTION][self.ZOOM_FACTOR])
except:
return 1.0
def get_bg_color(self):
try:
return self.config[CLIENT_SECTION][self.BG_COLOR]
except:
return 'white'
@write_back
def set_bg_color(self,color):
self.__set_value_both(self.BG_COLOR,color)
#def set_dicts(self,dicts:dict):
# #dict_names=[Path(x).stem for x in dict_paths]
## self.config[CONFIG_DAEMON_SECTION][self.DICT_FILED]=','.join(dicts.values())
# self.config[CONFIG_DAEMON_SECTION][self.ENABLED_FILED]=','.join(dicts.keys())
# with open(self.config_path,"w") as f:
# self.config.write(f)
# return dicts.keys()
#def add_dict(self,dict_path):
# dict_name=Path(dict_path).stem
# self.config[CONFIG_DAEMON_SECTION][self.DICT_FILED]+=f",{dict_path}"
# self.config[CONFIG_DAEMON_SECTION][self.ENABLED_FILED]+=f",{dict_name}"
# with open(self.config_path,"w") as f:
# self.config.write(f)
# return dict_name
#def get_section(self,section_name):
# if section_name in self.config.sections():
# return self.config[section_name]
# raise Exception(f"No config section {section_name}")
#def get_value(self, section, key):
# return self.config[section][key]
#def get_daemon_value(self, key):
# return self.config[CONFIG_DAEMON_SECTION][key]
#def get_frontend_value(self, key):
# return self.config[CONFIG_FRONTEND_SECTION][key]
#def get_dictionary_paths(self):
# dicts = self.get_value("dictionary daemon", "dictionaries").split(",")
## dicts=[x.strip() for x in dicts]
# index_folder=Path(self.get_daemon_value("index folder"))
# ans={}
# for path in dicts:
# path=Path(path)
# name=path.stem
# data_folder=str(index_folder.joinpath(name))
# ans[name]=[str(path),data_folder]
# return ans
#def get_enabled_dicts(self):
# try:
# dicts=self.get_daemon_value("enabled dictionaries")
# except Exception as e:
# return []
# return [x.strip() for x in dicts.split(',')] if dicts else []
if __name__ == '__main__':
pass
|
"""
Tests for the ``releases.util`` module.
These are in the integration suite because they deal with on-disk files.
"""
import os
from docutils.nodes import document
from spec import Spec, ok_, eq_
from sphinx.application import Sphinx
from releases.models import Release, Issue
from releases.util import get_doctree, parse_changelog
support = os.path.join(os.path.dirname(__file__), '_support')
vanilla = os.path.join(support, 'vanilla', 'changelog.rst')
unreleased_bugs = os.path.join(support, 'unreleased_bugs', 'changelog.rst')
class get_doctree_(Spec):
def obtains_app_and_doctree_from_filepath(self):
app, doctree = get_doctree(vanilla)
# Expect doctree & app
ok_(doctree)
ok_(app)
ok_(isinstance(doctree, document))
ok_(isinstance(app, Sphinx))
# Sanity checks of internal nodes, which should be Releases objects
entries = doctree[0][2]
ok_(isinstance(entries[0][0][0], Release))
bug = entries[1][0][0]
ok_(isinstance(bug, Issue))
eq_(bug.type, 'bug')
eq_(bug.number, '1')
class parse_changelog_(Spec):
def yields_releases_dict_from_changelog_path(self):
changelog = parse_changelog(vanilla)
ok_(changelog)
ok_(isinstance(changelog, dict))
eq_(
set(changelog.keys()),
{'1.0.0', '1.0.1', '1.0', 'unreleased_1_feature'},
)
eq_(len(changelog['1.0.0']), 0)
eq_(len(changelog['unreleased_1_feature']), 0)
eq_(len(changelog['1.0.1']), 1)
issue = changelog['1.0.1'][0]
eq_(issue.type, 'bug')
eq_(issue.number, '1')
eq_(changelog['1.0'], []) # emptied into 1.0.1
def unreleased_bugfixes_accounted_for(self):
changelog = parse_changelog(unreleased_bugs)
# Basic assertions
v101 = changelog['1.0.1']
eq_(len(v101), 1)
eq_(v101[0].number, '1')
v110 = changelog['1.1.0']
eq_(len(v110), 1)
eq_(v110[0].number, '2')
v102 = changelog['1.0.2']
eq_(len(v102), 1)
eq_(v102[0].number, '3')
# The crux of the matter: 1.0 bucket empty, 1.1 bucket still has bug 3
line_10 = changelog['1.0']
eq_(len(line_10), 0)
line_11 = changelog['1.1']
eq_(len(line_11), 1)
eq_(line_11[0].number, '3')
ok_(line_11[0] is v102[0])
|
from ..api import issue, worklog
import inject
class FindIssues(object):
def __init__(self, subparsers):
super().__init__()
sub = subparsers.add_parser('find',
help='find issues')
sub.add_argument('filters', metavar='filter', nargs='*')
sub.set_defaults(cmd=self.run)
@staticmethod
@inject.param('render')
def run(args, render):
filters = dict(f.split(':') for f in args.filters if ':' in f)
args = [f for f in args.filters if ':' not in f]
issues = issue.find(*args, **filters)
rendered = render(
issues,
mapping=[
('issue', 'key'),
('status', 'fields.status.name'),
('summary', 'fields.summary'),
]
)
print(rendered)
class MyIssues(object):
def __init__(self, subparsers):
super().__init__()
sub = subparsers.add_parser('my',
help='show user issues')
sub.set_defaults(cmd=self.run)
@staticmethod
@inject.param('config')
@inject.param('render')
def run(args, render, config):
issues = issue.find(assignee=config.jira.username,
resolution='unresolved')
rendered = render(
issues,
mapping=[
('issue', 'key'),
('status', 'fields.status.name'),
('summary', 'fields.summary'),
]
)
print(rendered)
class ShowIssue(object):
def __init__(self, subparsers):
super().__init__()
sub = subparsers.add_parser('show',
help='show an issue')
sub.add_argument('issue_id')
sub.set_defaults(cmd=self.run)
@staticmethod
@inject.param('render')
def run(args, render):
print(render(issue.get(args.issue_id), mapping=[
('issue', 'key'),
('summary', 'fields.summary'),
('status', 'fields.status.name'),
('issue type', 'fields.issuetype.name'),
('reporter', 'fields.reporter.displayName'),
('assignee', 'fields.assignee.displayName'),
]))
class Resolve(object):
def __init__(self, subparsers):
super().__init__()
sub = subparsers.add_parser('resolve',
help='resolve an issue')
sub.add_argument('issue_id')
sub.set_defaults(cmd=self.run)
@staticmethod
@inject.param('render')
def run(args, render):
issue.transition(args.issue_id, 'Resolve Issue')
print(render(issue.get(args.issue_id), mapping=[
('issue', 'key'),
('summary', 'fields.summary'),
('status', 'fields.status.name'),
('issue type', 'fields.issuetype.name'),
('reporter', 'fields.reporter.displayName'),
('assignee', 'fields.assignee.displayName'),
]))
class Grab(object):
def __init__(self, subparsers):
super().__init__()
sub = subparsers.add_parser('grab',
help='grab an issue')
sub.add_argument('issue_id')
sub.set_defaults(cmd=self.run)
@staticmethod
@inject.param('render')
@inject.param('config')
def run(args, render, config):
issue.assign(args.issue_id, config.jira.username)
print(render(issue.get(args.issue_id), mapping=[
('issue', 'key'),
('summary', 'fields.summary'),
('status', 'fields.status.name'),
('issue type', 'fields.issuetype.name'),
('reporter', 'fields.reporter.displayName'),
('assignee', 'fields.assignee.displayName'),
]))
class Log(object):
def __init__(self, subparsers):
super().__init__()
sub = subparsers.add_parser('log',
help='shows and add worklog entries')
sub.add_argument('issue_id')
sub.add_argument('-t', '--time', help='a worklog entry (?h ?m)')
sub.add_argument('-c', '--comment', help='adds comment to the entry')
sub.set_defaults(cmd=self.run)
@staticmethod
@inject.param('render')
@inject.param('config')
def run(args, render, config):
if args.comment and not args.time:
print('Adding a timelog comment requires setting --time')
return
if args.time:
worklog.add(args.issue_id,
comment=args.comment,
timeSpent=args.time)
print(render(worklog.all(args.issue_id), mapping=[
('author', 'author.displayName'),
('date', 'started'),
('time spent', 'timeSpent'),
('comment', 'comment'),
]))
|
import importlib
import sys
""" Rudimentary test runner for plugins
Pass in the plugin name as an argument, and make sure that there is a test.py file with a run() function in the plugin
directory.
"""
plugin = sys.argv[1]
test = importlib.import_module('plugins.' + plugin + '.test')
test.run()
|
class Key:
def __init__(self):
self.api_key = '1VTP01G4K4XHMPAB' |
from math import *
import numba
import numpy as np
import sympy
from scipy.special import gamma
from sympy.utilities.lambdify import lambdastr
import golf_course.estimate.numba as nestimate
class Target(object):
"""
Target
A class that describes regions around important points in the energy function.
Outside these these regions, the energy function would simply be a constant.
"""
def __init__(self, center, radiuses, energy_type, energy_params):
"""__init__
For 'well', there's only one parameter, 'depth'. We are going to use a quadratic energy function for this.
For 'crater', there're two parameters, 'depth' and 'height'. See the info in the notes about the energy
function. We will use a fourth-order polynomial. For 'random_well', there are three parameters, 'depth'
for the well energy, 'locations' for the locations of the Gaussian bumps, and 'standard_deviations' for
the standard_deviations of each multivariate Gaussian. For each Gaussian bumps, we would start by assuming
all the dimensions are independent of each other. But for each dimension, we can have a different
standard_deviation params['locations'] is an np array of shape (num_loc, n), where num_loc is the number
of Gaussian bumps we are going to put down, and n is the dimension of the system.
params['standard_deviations'] is also an np array of shape (num_loc, n). Here, each row holds the
standard_deviations of all those dimensions. As a result, diag(params['standard_deviations'][i, :]**2) would
be the covariance matrix for the ith Gaussian bump. For 'random_crater', there are four parameters. 'depth',
'height', 'locations', and 'standard_deviations'. Refer to the above comments for the meaning of these.
Parameters
----------
center: np.array
The location of the center for this Target. Length of the array is the dimension of the sysetm
radiuses: np.array
The radiuses of the three spheres involved. We should have radiuses[0]<radiuses[1]<radiuses[2],
energy_type: str
energy_type is the name of the type of energy we are going to use within the middle sphere.
Allowed energy types include 'random_well' and 'random_crater'. 'random_well' and
'random_crater' are two random energy functions. The way to define them is, we first
randomly pick some locations, and put down some Gaussian bumps at those locations.
We then multiply this energy function by either the the well energy or the crater energy,
to get random_well and random crater.
energy_params: dict
The parameters for the spefic energy type that we are using
Returns
-------
"""
assert len(radiuses) == 3
assert (
radiuses[0] < radiuses[1] and radiuses[1] < radiuses[2]
), 'Wrong radiuses.'
assert energy_type in set(['random_well', 'random_crater', 'flat'])
self.energy_type = energy_type
self.center = center
self.radiuses = radiuses
self.energy_params = energy_params
self.generate_force_field_function()
def generate_force_field_function(self):
if self.energy_type == 'flat':
get_force_field = lambda x: list(np.zeros_like(x))
else:
expr_generation_func_dict = {
'random_well': generate_random_well_sympy_expr,
'random_crater': generate_random_crater_sympy_expr,
}
location, gradient_expr = expr_generation_func_dict[self.energy_type](
self.center, self.radiuses, **self.energy_params
)
force_field_lambda_str = lambdastr(location, -gradient_expr)
n_dim = len(location)
old_argument = ','.join(['x{}'.format(ii) for ii in range(n_dim)])
force_field_lambda_str = force_field_lambda_str.replace(old_argument, 'x')
for ii in range(n_dim):
force_field_lambda_str = force_field_lambda_str.replace(
'x{}'.format(ii), 'x[{}]'.format(ii)
)
get_force_field = eval(force_field_lambda_str)
get_force_field = numba.jit(get_force_field)
@numba.jit(nopython=True, cache=True)
def advance_within_concentric_spheres_numba(
current_location,
center,
r1,
boundary_radiuses,
time_step,
reflecting_boundary_radius,
):
origin = np.zeros_like(current_location)
n_dim = center.size
inner_boundary_squared = boundary_radiuses[0] ** 2
outer_boundary_squared = boundary_radiuses[1] ** 2
r1_squared = r1 ** 2
scale = np.sqrt(time_step)
previous_location = current_location
target_flag = False
while True:
r_vector = current_location - center
r_squared = np.sum(r_vector ** 2)
if r_squared <= inner_boundary_squared:
target_flag = True
break
elif r_squared >= outer_boundary_squared:
break
if r_squared >= r1_squared:
force_field = np.zeros_like(current_location)
else:
force_field = np.array(get_force_field(current_location))
previous_location = current_location
random_component = scale * np.random.randn(n_dim)
current_location = (
previous_location + force_field * time_step + random_component
)
current_location = nestimate.simulate_reflecting_boundary(
origin,
reflecting_boundary_radius,
previous_location,
current_location,
scale,
time_step,
force_field,
)
return previous_location, current_location, target_flag
self.advance_within_concentric_spheres_numba = (
advance_within_concentric_spheres_numba
)
def get_constant(self):
n_dim = self.center.size
constant = 2 * np.pi ** (n_dim / 2) / gamma(n_dim / 2)
if np.linalg.norm(self.center) + self.radiuses[1] > 1:
constant *= np.arccos(self.radiuses[1] / 2) / np.pi
return constant
def generate_random_well_sympy_expr(
center,
radiuses,
depth=None,
locations=None,
standard_deviations=None,
multiplier=None,
):
"""generate_random_well_sympy_expr
Parameters
----------
center :
radiuses :
depth : float
The depth of the potential well
locations : np.array
The locations of all the Gaussian random bumps
standard_deviations : np.array
The standard deviations for the different Gaussian random bumps
multiplier : float
The multiplier used to balance the main and random parts of the energy function
Returns
-------
"""
assert locations.shape[1] == center.size
assert standard_deviations.shape == (locations.shape[0],)
n_dim = center.size
n_bumps = locations.shape[0]
location = sympy.Array(sympy.symbols('x:{}'.format(n_dim)), (n_dim,))
center = sympy.Array(center, center.shape)
r_squared = sympy_array_squared_norm(location - center)
well_expr = (
-(depth / radiuses[1] ** 4)
* (r_squared ** 2 - 2 * radiuses[1] ** 2 * r_squared)
- depth
)
mollifier_expr = sympy.functions.exp(
-radiuses[1] / (radiuses[1] - sympy_array_squared_norm(location - center) ** 10)
) / np.exp(-1)
random_components = [
sympy.functions.exp(
-sympy_array_squared_norm(location - sympy.Array(locations[ii], (n_dim,)))
/ (2 * standard_deviations[ii] ** 2)
)
for ii in range(n_bumps)
]
random_expr = 0
for ii in range(n_bumps):
random_expr += random_components[ii]
sympy_expr = well_expr + multiplier * mollifier_expr * random_expr
gradient_expr = sympy.derive_by_array(sympy_expr, location)
return location, gradient_expr
def generate_random_crater_sympy_expr(
center,
radiuses,
depth=None,
height=None,
locations=None,
standard_deviations=None,
multiplier=None,
):
"""generate_random_crater_sympy_expr
Parameters
----------
center :
radiuses :
depth : float
The depth of the crater
height : float
The height of the crater
locations : np.array
The locations of all the Gaussian random bumps
standard_deviations : np.array
The standard deviations for the different Gaussian random bumps
multiplier : float
The multiplier used to balance the main and the random parts of the energy function
Returns
-------
"""
assert locations.shape[1] == center.size
assert standard_deviations.shape == (locations.shape[0],)
n_dim = center.size
n_bumps = locations.shape[0]
location = sympy.Array(sympy.symbols('x:{}'.format(n_dim)), (n_dim,))
center = sympy.Array(center, center.shape)
r_squared = sympy_array_squared_norm(location - center)
C = (
3
* radiuses[1] ** 2
* sympy.cbrt(depth * height * (depth + sympy.sqrt(depth * (depth + height))))
)
Delta0 = -9 * depth * height * radiuses[1] ** 4
b_squared = -(1 / (3 * depth)) * (-3 * depth * radiuses[1] ** 2 + C + Delta0 / C)
a = depth / (3 * b_squared * radiuses[1] ** 4 - radiuses[1] ** 6)
crater_expr = (
a
* (
2 * r_squared ** 3
- 3 * (b_squared + radiuses[1] ** 2) * r_squared ** 2
+ 6 * b_squared * radiuses[1] ** 2 * r_squared
)
- depth
)
mollifier_expr = sympy.functions.exp(
-radiuses[1] / (radiuses[1] - sympy_array_squared_norm(location - center) ** 10)
) / np.exp(-1)
random_components = [
sympy.functions.exp(
-sympy_array_squared_norm(location - sympy.Array(locations[ii], (n_dim,)))
/ (2 * standard_deviations[ii] ** 2)
)
for ii in range(n_bumps)
]
random_expr = 0
for ii in range(n_bumps):
random_expr += random_components[ii]
sympy_expr = crater_expr + multiplier * mollifier_expr * random_expr
gradient_expr = sympy.derive_by_array(sympy_expr, location)
return location, gradient_expr
def sympy_array_squared_norm(sympy_array):
return sympy.tensor.array.tensorcontraction(
sympy_array.applyfunc(lambda x: x ** 2), (0,)
)
|
from unittest.mock import AsyncMock, MagicMock, PropertyMock
import pytest
import gidgethub
import abstracts
from aio.api import github
from aio.api import github as base_github
@abstracts.implementer(github.AGithubIssue)
class DummyGithubIssue:
pass
@abstracts.implementer(github.AGithubIssues)
class DummyGithubIssues:
pass
def test_abstract_issue_constructor(patches):
args = tuple(f"ARG{i}" for i in range(0, 3))
kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)}
patched = patches(
"GithubRepoEntity.__init__",
prefix="aio.api.github.abstract.issues")
with patched as (m_super, ):
m_super.return_value = None
issue = DummyGithubIssue(*args, **kwargs)
assert isinstance(issue, github.abstract.base.GithubRepoEntity)
assert (
m_super.call_args
== [args, kwargs])
issue.repo = MagicMock()
issue.number = 23
assert (
str(issue)
== f"<{issue.__class__.__name__} {issue.repo.name}#23>")
@pytest.mark.parametrize("number", range(0, 3))
@pytest.mark.parametrize("other_number", range(0, 3))
def test_issue_dunder_gt(number, other_number):
issue1 = DummyGithubIssue("REPO", "DATA")
issue1.number = number
issue2 = DummyGithubIssue("REPO", "DATA")
issue2.number = other_number
assert (issue1 > issue2) == (number > other_number)
@pytest.mark.parametrize("number", range(0, 3))
@pytest.mark.parametrize("other_number", range(0, 3))
def test_issue_dunder_lt(number, other_number):
issue1 = DummyGithubIssue("REPO", "DATA")
issue1.number = number
issue2 = DummyGithubIssue("REPO", "DATA")
issue2.number = other_number
assert (issue1 < issue2) == (number < other_number)
async def test_abstract_issue_close(patches):
issue = DummyGithubIssue("REPO", "DATA")
patched = patches(
"AGithubIssue.edit",
prefix="aio.api.github.abstract.issues")
with patched as (m_edit, ):
assert (
await issue.close()
== m_edit.return_value)
assert (
m_edit.call_args
== [(), dict(state="closed")])
async def test_abstract_issue_comment():
repo = MagicMock()
repo.post = AsyncMock()
issue = DummyGithubIssue(repo, "DATA")
issue.number = 23
assert (
await issue.comment("COMMENT")
== repo.post.return_value)
assert (
repo.post.call_args
== [("issues/23/comments", ),
dict(data=dict(body="COMMENT"))])
@pytest.mark.parametrize(
"kwargs", [{}, {f"K{i}": f"V{i}" for i in range(0, 3)}])
async def test_abstract_issue_edit(patches, kwargs):
repo = MagicMock()
repo.patch = AsyncMock()
issue = DummyGithubIssue(repo, "DATA")
issue.number = 23
patched = patches(
"AGithubIssue.__init__",
prefix="aio.api.github.abstract.issues")
with patched as (m_init, ):
m_init.return_value = None
result = await issue.edit(**kwargs)
assert isinstance(result, github.AGithubIssue)
assert (
m_init.call_args
== [(repo, repo.patch.return_value), {}])
assert (
repo.patch.call_args
== [("issues/23", ), dict(data=kwargs)])
@pytest.mark.parametrize("repo", [None, "REPO"])
@pytest.mark.parametrize("filter", [None, "FILTER"])
def test_abstract_issues_constructor(repo, filter):
args = (
(repo, )
if repo
else ())
kwargs = {}
if filter:
kwargs["filter"] = filter
issues = DummyGithubIssues("GITHUB", *args, **kwargs)
assert issues.github == "GITHUB"
assert issues.repo == repo
assert issues._filter == (filter or "")
@pytest.mark.parametrize("repo", [None, "REPO"])
@pytest.mark.parametrize("filter", [None, "FILTER"])
def test_abstract_issues_filter(repo, filter):
repo = MagicMock() if repo else None
args = (
(repo, )
if repo
else ())
kwargs = {}
if filter:
kwargs["filter"] = filter
issues = DummyGithubIssues("GITHUB", *args, **kwargs)
filter_parts = []
if filter:
filter_parts.append(filter)
if repo:
filter_parts.append(f"repo:{repo.name}")
filters = " ".join(filter_parts)
assert (
issues.filter
== (f"{filters} " if filters else ""))
@pytest.mark.parametrize("repo1", [None, "REPO1"])
@pytest.mark.parametrize("repo2", [None, "REPO2"])
@pytest.mark.parametrize(
"raises", [None, Exception, gidgethub.GitHubException])
async def test_abstract_issues_create(repo1, repo2, raises):
github = MagicMock()
kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)}
repo1 = (
MagicMock()
if repo1
else None)
repo2 = (
MagicMock()
if repo2
else None)
args1 = (
(repo1, )
if repo1
else ())
data_kwargs = dict(data=kwargs.copy())
data_kwargs["data"]["title"] = "ISSUE_TITLE"
if repo2:
kwargs["repo"] = repo2
repo = repo2 or repo1
issues = DummyGithubIssues(github, *args1)
if not repo:
with pytest.raises(base_github.exceptions.IssueCreateError) as e:
await issues.create("ISSUE_TITLE", **kwargs)
assert not github.issue_class.called
assert (
e.value.args[0]
== ("To create an issue, either `DummyGithubIssues` must be "
"instantiated with a `repo` or `create` must be called with "
"one."))
return
repo.post = AsyncMock()
if raises:
repo.post.side_effect = raises("BOOM!")
error = (
base_github.exceptions.IssueCreateError
if raises != Exception
else raises)
with pytest.raises(error) as e:
await issues.create("ISSUE_TITLE", **kwargs)
if raises != Exception:
assert (
e.value.args[0]
== ("Failed to create issue 'ISSUE_TITLE' in "
f"{repo.name}\nRecieved: BOOM!"))
assert not github.issue_class.called
assert (
repo.post.call_args
== [('issues',), data_kwargs])
else:
assert (
await issues.create("ISSUE_TITLE", **kwargs)
== github.issue_class.return_value)
assert (
github.issue_class.call_args
== [(repo, repo.post.return_value), {}])
assert (
repo.post.call_args
== [('issues',), data_kwargs])
@pytest.mark.parametrize("repo1", [None, "REPO1"])
@pytest.mark.parametrize("repo2", [None, "REPO2"])
def test_abstract_issues_inflater(patches, repo1, repo2):
github = MagicMock()
args1 = (
(repo1, )
if repo1
else ())
args2 = (
(repo2, )
if repo2
else ())
repo = repo2 or repo1
issues = DummyGithubIssues(github, *args1)
patched = patches(
"partial",
"AGithubIssues._inflate",
prefix="aio.api.github.abstract.issues")
with patched as (m_partial, m_inflate):
assert (
issues.inflater(*args2)
== (m_inflate
if not repo
else m_partial.return_value))
if not repo:
assert not m_partial.called
return
assert (
m_partial.call_args
== [(github.issue_class, repo), {}])
@pytest.mark.parametrize("repo", [None, "REPO"])
def test_abstract_issues_search(patches, repo):
github = MagicMock()
args = (
(repo, )
if repo
else ())
issues = DummyGithubIssues(github)
patched = patches(
"AGithubIssues.inflater",
"AGithubIssues.search_query",
prefix="aio.api.github.abstract.issues")
with patched as (m_inflater, m_query):
assert (
issues.search("QUERY", *args)
== github.getiter.return_value)
assert (
github.getiter.call_args
== [(m_query.return_value, ),
dict(inflate=m_inflater.return_value)])
assert (
m_query.call_args
== [("QUERY", ), {}])
assert (
m_inflater.call_args
== [(repo, ), {}])
def test_abstract_issues_search_query(patches):
issues = DummyGithubIssues("GITHUB")
patched = patches(
"urllib",
("AGithubIssues.filter",
dict(new_callable=PropertyMock)),
prefix="aio.api.github.abstract.issues")
with patched as (m_url, m_filter):
assert (
issues.search_query("QUERY")
== f"/search/issues?q={m_url.parse.quote.return_value}")
assert (
m_url.parse.quote.call_args
== [(f"{m_filter.return_value}QUERY", ), {}])
def test_abstract_issues__inflate():
github = MagicMock()
issues = DummyGithubIssues(github)
result = dict(foo="BAR", repository_url="URL")
assert (
issues._inflate(result)
== github.issue_class.return_value)
assert (
github.issue_class.call_args
== [(github.repo_from_url.return_value, result), {}])
assert (
github.repo_from_url.call_args
== [("URL", ), {}])
|
"""
A watchdog timer
"""
import machine
class WatchDogTimer(object):
def __init__(self, period, timer_id=-1):
self.period = period
self.timer = machine.Timer(timer_id)
self._start()
def _start(self):
self.timer.init(
period=self.period,
mode=machine.Timer.ONE_SHOT,
callback=self._reboot
)
def _stop(self):
self.timer.deinit()
def _reboot(self, o):
machine.reset()
def reset(self):
self._stop()
self._start()
|
import inspect
import pickle
import cv2
import numpy as np
from ColorComponents import ColorComponents
from HOGExtractor import HOGExtractor
from HistExtractor import HistExtractor
from SpacialExtractor import SpacialExtractor
class FeatureCombiner:
def __init__(self, extractors, pickle_path=None):
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
for key, value in [(i, values[i]) for i in args]:
setattr(self, key, value)
if pickle_path is not None:
combiner = {"combiner": self}
pickle.dump(combiner, open(pickle_path, "wb"))
@classmethod
def from_pickle(cls, pickle_path):
with open(pickle_path, mode='rb') as f:
combiner = pickle.load(f)
return combiner['combiner']
def from_dataset(self, dataset):
cars = dataset.cars
notcars = dataset.notcars
cars_features = self.from_files(cars)
notcars_features = self.from_files(notcars)
return cars_features, notcars_features
def from_files(self, files):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in files:
image = cv2.imread(file)
features.append(FeatureCombiner.combine(image, self.extractors))
# Return list of feature vectors
return features
def from_images(self, images):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for image in images:
features.append(FeatureCombiner.combine(image, self.extractors))
# Return list of feature vectors
return features
@classmethod
def combine(cls, img, extractors):
ccomponents = ColorComponents(img)
extracts = []
for extractor in extractors:
extracts.append(cls.extract(ccomponents, extractor))
extracts = np.concatenate(extracts)
return extracts
@classmethod
def extract(cls, ccomponents, extractor):
feature = extractor.extract(None, ccomponents)
return feature
if __name__ == '__main__':
combiner = FeatureCombiner([SpacialExtractor(), HistExtractor(), HOGExtractor()],
pickle_path="./dataset/combiner.p")
|
from django.views.generic import TemplateView, FormView
from .readopensecrets import read_contributions, read_contibutors
from .readpolitifact import read_recent_statements
from .fivethirtyeight import getPollNum
from django.http.response import HttpResponseRedirect
from django.shortcuts import reverse
from .forms import IssueForm
class Home(FormView):
template_name = 'home.html'
form_class = IssueForm
def get(self, request):
self.request.session ["top_contribs"] = {
"hillary_contribs": read_contributors("hillary-clinton"),
"gary_contribs": read_contributors("gary-johnson"),
"donald_contribs": read_contributors("donald-trump")
}
return super(Home, self).get(request)
def form_valid(self, form):
topic = form.cleaned_data.get("issue")
# Get the OpenSecrets data on the given topic
self.request.session["os_data"] = {
"hillary_os": read_contributions("hillary-clinton", topic),
"gary_os": read_contributions("gary-johnson", topic),
"donald_os": read_contributions("donald-trump", topic),
}
# Get the Politifact data on the given topic
self.request.session["pf_data"] = {
"hillary_pf": read_recent_statements("hillary-clinton", topic),
"gary_pf": read_recent_statements("gary-johnson", topic),
"donald_pf": read_recent_statements("donald-trump", topic),
}
# Get polling data from 538
self.request.session["polling_data"] = {
"hillary_polling": getPollNum("hillary-clinton"),
"gary_polling": getPollNum("gary-johnson"),
"donald_polling": getPollNum("donald-trump")
}
print(self.request.session["polling_data"])
return HttpResponseRedirect(reverse('results'))
class Results(TemplateView):
template_name = 'results.html'
|
# coding=utf-8
"""
サーバー
"""
import datetime
import json
import logging
import os
import pathlib
import Mykytea
import flask
import flask_api.status
import flask_classy
import configs
# 作者
__author__ = 'Masaya Suzuki'
# バージョン
__version__ = '0.1.6'
# 設定
conf = configs.Config(pathlib.Path.cwd().parent / 'configs')
app = flask.Flask(__name__, conf.get('general', 'front', 'url'), conf.get('general', 'front', 'dir path'))
def output_http_data(headers, body):
"""
HTTPデータ (リクエストやレスポンス) の内容を出力する
:param headers: HTTPデータ (リクエストやレスポンス) のheader
:param body: HTTPデータ (リクエストやレスポンス) のbody
"""
app.logger.debug('[Header]')
for header in headers:
app.logger.debug('{}: {}'.format(*header))
app.logger.debug(os.linesep.join(['[Data]',
json.dumps(body, indent=4, ensure_ascii=False, sort_keys=True)]))
def convert_tag(tag):
"""
タグをレスポンス用の形式に変換する
:param tag: KyTeaが出力したタグ
:return: レスポンス用のタグ
"""
if tag == '0': # 0タグが付与されたとき
return ''
elif tag == 'UNK': # タグがUnknownなとき
return '(Unknown)'
else: # タグが付与されたとき
return tag
@app.route('/')
def index():
"""
トップページを表示
:return: トップページ
"""
app.logger.debug('/ called!')
return app.send_static_file('index.html')
class KyTeaView(flask_classy.FlaskView):
"""
KyTeaによる解析結果を返すView
"""
trailing_slash = False
def __init__(self):
# KyTea
self.kytea = Mykytea.Mykytea('')
def _make_responce(self, request):
"""
レスポンスを生成する
:param request: リクエスト
:return: レスポンス
"""
# レスポンス
responce = list()
# KyTeaによる解析を行い、その結果を処理
for word_data in self.kytea.getAllTags(request.strip()):
word_data.surface = word_data.surface.strip()
if word_data.surface:
responce.append({'word': word_data.surface,
'pos': convert_tag(word_data.tag[0][0][0]),
'pronunciation': [{'margin': margin, 'pronunciation': convert_tag(tag)}
for tag, margin in word_data.tag[1]]})
return responce
def post(self):
try:
app.logger.debug('/kytea/ called!')
# リクエスト
request = flask.request.get_data(as_text=True)
app.logger.debug('<Request>')
output_http_data(flask.request.headers, request)
response = flask.jsonify(self._make_responce(request))
response.status_code = flask_api.status.HTTP_200_OK
response.headers['Access-Control-Allow-Origin'] = '*'
app.logger.debug('<Response>')
app.logger.debug('[Status]')
app.logger.debug(response.status)
output_http_data(response.headers, response.json)
return response
except Exception as e:
app.logger.exception(e)
flask.abort(flask_api.status.HTTP_500_INTERNAL_SERVER_ERROR)
if __name__ == '__main__':
# RootLoggerのログレベルをDEBUGに設定
logging.root.setLevel(logging.DEBUG)
# RootLoggerにハンドラをセット
for handler in [logging.StreamHandler(),
logging.FileHandler(str(pathlib.Path(conf.get('general', 'log', 'path'))
/ (datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '.log')))]:
handler.setLevel(logging.root.getEffectiveLevel())
handler.setFormatter(logging.Formatter('[%(name)s %(asctime)s %(levelname)s] %(message)s'))
logging.root.addHandler(handler)
KyTeaView.register(app)
if __name__ == '__main__':
app.run(conf.get('general', 'server', 'host'), conf.get('general', 'server', 'port'), True, use_reloader=False)
|
from tkinter import*
import random
import time
root = Tk()
root.geometry("1100x700+150+5")
root.title("Restaurant Billing System")
Tops = Frame(root,width = 900,height=50)
Tops.pack(side=TOP)
f1 = Frame(root,width=1600,height=700)
f1.pack(side=LEFT)
#------------------TIME--------------
localtime=time.asctime(time.localtime(time.time()))
#-----------------INFO TOP------------
lblinfo = Label(Tops, font=( 'aria' ,30, 'bold' ),text="Restaurant Billing System",fg="steel blue",bd=45)
lblinfo.grid(row=0,column=0)
lblinfo = Label(Tops, font=( 'aria' ,20, ),text=localtime,fg="steel blue")
lblinfo.grid(row=1,column=0)
#---------------ALGEBRAIC CALCULATION------------------
text_Input=StringVar()
operator =""
def Ref():
x=random.randint(1,100)
randomRef = str(x)
rand.set(randomRef)
cof =float(Fries.get())
colfries= float(Largefries.get())
cob= float(Burger.get())
cofi= float(Filet.get())
cochee= float(Cheese_burger.get())
codr= float(Drinks.get())
costoffries = cof*25
costoflargefries = colfries*40
costofburger = cob*35
costoffilet = cofi*50
costofcheeseburger = cochee*50
costofdrinks = codr*35
costofmeal = str('%.2f'% (costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks))
PayTax=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)*0.33)
Totalcost=(costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)
Ser_Charge=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)/99)
Service=str('%.2f'% Ser_Charge)
OverAllCost= str('%.2f' % (PayTax + Totalcost + Ser_Charge))
PaidTax= str('%.2f'% PayTax)
Service_Charge.set(Service)
cost.set(costofmeal)
Tax.set(PaidTax)
Subtotal.set(costofmeal)
Total.set(OverAllCost)
def qexit():
root.destroy()
def reset():
rand.set("")
Fries.set("")
Largefries.set("")
Burger.set("")
Filet.set("")
Subtotal.set("")
Total.set("")
Service_Charge.set("")
Drinks.set("")
Tax.set("")
cost.set("")
Cheese_burger.set("")
#---------------------------------------------------------------------------------------
rand = StringVar()
Fries = StringVar()
Largefries = StringVar()
Burger = StringVar()
Filet = StringVar()
Subtotal = StringVar()
Total = StringVar()
Service_Charge = StringVar()
Drinks = StringVar()
Tax = StringVar()
cost = StringVar()
Cheese_burger = StringVar()
lblreference = Label(f1, font=( 'aria' ,16, 'bold' ),text="Order No.",fg="steel blue",bd=10)
lblreference.grid(row=0,column=0)
lblreference = Label(f1,font=('ariel' ,16,'bold'), textvariable=rand ,width=12, bd=4,fg="black")
lblreference.grid(row=0,column=1)
lblfries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Fries Meal",fg="steel blue",bd=10)
lblfries.grid(row=1,column=0)
txtfries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Fries ,width=12, bd=4,bg="powder blue" ,justify='right')
txtfries.grid(row=1,column=1)
lblLargefries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Lunch Meal",fg="steel blue",bd=10)
lblLargefries.grid(row=2,column=0)
txtLargefries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Largefries ,width=12, bd=4,bg="powder blue" ,justify='right')
txtLargefries.grid(row=2,column=1)
lblburger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Burger Meal",fg="steel blue",bd=10)
lblburger.grid(row=3,column=0)
txtburger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Burger ,width=12, bd=4,bg="Powder blue" ,justify='right')
txtburger.grid(row=3,column=1)
lblFilet = Label(f1, font=( 'aria' ,16, 'bold' ),text="Pizza Meal",fg="steel blue",bd=10)
lblFilet.grid(row=4,column=0)
txtFilet = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Filet ,width=12, bd=4,bg="powder blue" ,justify='right')
txtFilet.grid(row=4,column=1)
lblCheese_burger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Cheese burger",fg="steel blue",bd=10)
lblCheese_burger.grid(row=5,column=0)
txtCheese_burger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Cheese_burger ,width=12, bd=4,bg="powder blue" ,justify='right')
txtCheese_burger.grid(row=5,column=1)
lblDrinks = Label(f1, font=( 'aria' ,16, 'bold' ),text="Drinks",fg="steel blue",bd=10)
lblDrinks.grid(row=6,column=0)
txtDrinks = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Drinks ,width=12, bd=4,bg="powder blue" ,justify='right')
txtDrinks.grid(row=6,column=1)
#-----------------------------------------------------------------------------------------------------------------------------
lblcost = Label(f1, font=( 'aria' ,16, 'bold' ),text="Cost",fg="steel blue",bd=10)
lblcost.grid(row=1,column=3)
lblcost = Label(f1,font=('ariel' ,16,'bold'), textvariable=cost , bd=6,fg="black")
lblcost.grid(row=1,column=4)
lblService_Charge = Label(f1, font=( 'aria' ,16, 'bold' ),text="Service Charge",fg="steel blue",bd=10)
lblService_Charge.grid(row=2,column=3)
lblreference = Label(f1,font=('ariel' ,16,'bold'), textvariable=Service_Charge , bd=6,fg="black")
lblreference.grid(row=2,column=4)
lblTax = Label(f1, font=( 'aria' ,16, 'bold' ),text="Tax",fg="steel blue",bd=10)
lblTax.grid(row=3,column=3)
lblreference = Label(f1,font=('ariel' ,16,'bold'), textvariable=Tax , bd=6,fg="black")
lblreference.grid(row=3,column=4)
lblSubtotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Subtotal",fg="steel blue",bd=10)
lblSubtotal.grid(row=4,column=3)
lblreference = Label(f1,font=('ariel' ,16,'bold'), textvariable=Subtotal , bd=6,fg="black")
lblreference.grid(row=4,column=4)
lblTotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Total",fg="steel blue",bd=10)
lblTotal.grid(row=5,column=3)
lblreference = Label(f1,font=('ariel' ,16,'bold'), textvariable=Total , bd=6,fg="black")
lblreference.grid(row=5,column=4)
#-----------------------------------------BUTTONS------------------------------------------
btnTotal=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="TOTAL", bg="powder blue",command=Ref)
btnTotal.grid(row=10, column=1)
btnreset=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="RESET", bg="powder blue",command=reset)
btnreset.grid(row=10, column=3)
btnexit=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="EXIT", bg="powder blue",command=qexit)
btnexit.grid(row=10, column=4)
def price():
roo = Tk()
roo.geometry("600x220+0+0")
roo.title("Price List")
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="ITEM", fg="black", bd=5)
lblinfo.grid(row=0, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="PRICE", fg="black")
lblinfo.grid(row=0, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Fries Meal", fg="steel blue")
lblinfo.grid(row=1, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="25", fg="steel blue")
lblinfo.grid(row=1, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Lunch Meal", fg="steel blue")
lblinfo.grid(row=2, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="40", fg="steel blue")
lblinfo.grid(row=2, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Burger Meal", fg="steel blue")
lblinfo.grid(row=3, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue")
lblinfo.grid(row=3, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Pizza Meal", fg="steel blue")
lblinfo.grid(row=4, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="50", fg="steel blue")
lblinfo.grid(row=4, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Cheese Burger", fg="steel blue")
lblinfo.grid(row=5, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="30", fg="steel blue")
lblinfo.grid(row=5, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Drinks", fg="steel blue")
lblinfo.grid(row=6, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue")
lblinfo.grid(row=6, column=3)
roo.mainloop()
btnprice=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="PRICE", bg="powder blue",command=price)
btnprice.grid(row=10, column=0)
def bill():
ro=Tk()
ro.geometry("700x500")
ro.title("Bill")
q1 =Fries.get()
q2= Largefries.get()
q3= Burger.get()
q4= Filet.get()
q5= Cheese_burger.get()
q6= Drinks.get()
q7=rand.get()
t1=cost.get()
t2=Service_Charge.get()
t3=Tax.get()
t4=Subtotal.get()
t5=Total.get()
lblinfo = Label(ro, font=( 'aria' ,15, 'bold' ),text=" ***ORDER",fg="steel blue",bd=5)
lblinfo.grid(row=0,column=2)
lblinfo = Label(ro, font=( 'aria' ,15, 'bold' ),text="NO. {} ***".format(q7),fg="steel blue",bd=5)
lblinfo.grid(row=0,column=3)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="ITEM", fg="black", bd=5)
lblinfo.grid(row=1, column=0)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="QUANTITY", fg="black")
lblinfo.grid(row=1, column=3)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="AMOUNT", fg="black")
lblinfo.grid(row=1, column=5)
#-----------------------------------------------------------------------------------------
#Item Column
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Fries Meal", fg="black")
lblinfo.grid(row=2, column=0)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Lunch Meal", fg="black")
lblinfo.grid(row=3, column=0)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Burger Meal", fg="black")
lblinfo.grid(row=4, column=0)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Pizza Meal", fg="black")
lblinfo.grid(row=5, column=0)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Cheeseburger", fg="black")
lblinfo.grid(row=6, column=0)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Drinks", fg="black")
lblinfo.grid(row=7, column=0)
#------------------------------------------------------------------------------------------------
#Quantity Column
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="{}".format(q1),fg="black")
lblinfo.grid(row=2, column=3)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="{}".format(q2),fg="black")
lblinfo.grid(row=3, column=3)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="{}".format(q3),fg="black")
lblinfo.grid(row=4, column=3)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="{}".format(q4),fg="black")
lblinfo.grid(row=5, column=3)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="{}".format(q5), fg="black")
lblinfo.grid(row=6, column=3)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="{}".format(q6), fg="black")
lblinfo.grid(row=7, column=3)
#--------------------------------------------------------------------------------------------------
#Amount List Column
c1=int(q1)
c2=int(q2)
c3=int(q3)
c4=int(q4)
c5=int(q5)
c6=int(q6)
c1=c1*25
c2=c2*40
c3=c3*35
c4=c4*50
c5=c5*30
c6=c6*35
lblinfo = Label(ro, font=('aria', 15, 'bold'), text=c1, fg="black")
lblinfo.grid(row=2, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text= c2, fg="black")
lblinfo.grid(row=3, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text=c3, fg="black")
lblinfo.grid(row=4, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text=c4, fg="black")
lblinfo.grid(row=5, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text=c5, fg="black")
lblinfo.grid(row=6, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text=c6, fg="black")
lblinfo.grid(row=7, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="-----------------", fg="black")
lblinfo.grid(row=8, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Cost: Rs {}".format(t1), fg="black")
lblinfo.grid(row=9, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Service Charge: Rs {}".format(t2), fg="black")
lblinfo.grid(row=10, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Tax: Rs {}".format(t3), fg="black")
lblinfo.grid(row=11, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Subtotal: Rs {}".format(t4), fg="black")
lblinfo.grid(row=12, column=5)
lblinfo = Label(ro, font=('aria', 15, 'bold'), text="Total: Rs {}".format(t5), fg="black")
lblinfo.grid(row=13, column=5)
lblinfo = Label(ro, font=('italic', 15, 'bold'), text="THANK YOU!", fg="black")
lblinfo.grid(row=16, column=3)
ro.mainloop()
btnBill=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="BILL", bg="powder blue",command=bill)
btnBill.grid(row=10, column=2)
root.mainloop()
|
# ------------------------------------------------------------------------------
# pose.pytorch
# Copyright (c) 2018-present Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import cv2
from PIL import Image
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import _init_paths
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
from core.function import validate
from core.inference import get_final_preds_wo_c_s
from utils.utils import create_logger
from utils.vis import save_demo_images
import dataset
import models
def parse_args():
parser = argparse.ArgumentParser(description="Train keypoints network")
# general
parser.add_argument("--cfg", help="experiment configure file name", required=True, type=str)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
parser.add_argument("--modelDir", help="model directory", type=str, default="")
parser.add_argument("--logDir", help="log directory", type=str, default="")
parser.add_argument("--dataDir", help="data directory", type=str, default="")
parser.add_argument("--imFile", help="input image file", type=str, default="")
args = parser.parse_args()
return args
def main():
args = parse_args()
update_config(cfg, args)
logger, final_output_dir, tb_log_dir = create_logger(cfg, args.cfg, "valid")
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
model = eval("models." + cfg.MODEL.NAME + ".get_pose_net")(cfg, is_train=False)
if cfg.TEST.MODEL_FILE:
logger.info("=> loading model from {}".format(cfg.TEST.MODEL_FILE))
model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
else:
model_state_file = os.path.join(final_output_dir, "final_state.pth")
logger.info("=> loading model from {}".format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
# model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
device = torch.device("cpu")
model = model.to(device)
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
test_transform = transforms.Compose(
[
transforms.ToTensor(),
normalize,
]
)
basewidth = 384
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
print(
cap.get(cv2.CAP_PROP_FRAME_WIDTH),
cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
cap.get(cv2.CAP_PROP_FPS),
)
while True:
_, frame = cap.read()
img = frame
# im_name = args.imFile
# img = Image.open(im_name).convert("RGB")
# print("Image original shape = ", img.size)
print(img.shape)
wpercent = float(basewidth) / float(img.shape[1])
hsize = float(img.shape[0]) * float(wpercent)
# img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img = cv2.resize(img, (384, 288))
img = test_transform(img)
img = torch.unsqueeze(img, 0)
# print("Input image final shape = ", img.shape)
print(img.shape[-1])
# evaluate on validation set
with torch.no_grad():
outputs = model(img)
image_to_pred_scale = img.shape[-1] / outputs.shape[-1]
# print("Predicted heatmap size = ", outputs.shape)
# print(outputs)
# print("Image to prediction scale = ", image_to_pred_scale)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
preds, maxvals = get_final_preds_wo_c_s(output.clone().cpu().numpy())
# 結果画像を表示する
# dispFps.disp(vis_result)
# cv2.imshow("frame", vis_result)
# waitKey()...1msの間キー入力を待つ関数
keyboard_input = cv2.waitKey(100) # キー操作取得。64ビットマシンの場合,& 0xFFが必要
prop_val = cv2.getWindowProperty("frame", cv2.WND_PROP_ASPECT_RATIO) # ウィンドウが閉じられたかを検知する用
# qが押されるか、ウィンドウが閉じられたら終了
if keyboard_input in (27, ord("q"), ord("Q")):
break
save_demo_images(
batch_image=img,
batch_joints=preds * image_to_pred_scale,
batch_joints_vis=maxvals,
# file_name="out.png",
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import argparse
from pprint import pprint
def get_args():
'''This function parses and return arguments passed in'''
parser = argparse.ArgumentParser(
prog='Alignment to specie',
description='From a sam file, returns specie names')
parser.add_argument('mapsam', help="path to sam file")
parser.add_argument(
'-outdir',
default="./",
help="path to (existing) output directory")
parser.add_argument(
'-minlen',
default=28,
help="Minimum length of match to report")
parser.add_argument(
'-idpercent',
default=0.99,
help="Minimum identity percentage of match to report")
args = parser.parse_args()
mysam = args.mapsam
myoutdir = args.outdir
minlen = args.minlen
idpercent = args.idpercent
return(mysam, myoutdir, minlen, idpercent)
def get_basename(samfile_name):
if ("/") in samfile_name:
basename = samfile_name.split("/")[-1].split(".")[0]
else:
basename = samfile_name.split(".")[0]
return(basename)
if __name__ == "__main__":
mysam, myoutdir, minlen, idpercent = get_args()
matchdict = {}
readdict = {}
basename = get_basename(mysam)
with open(mysam, "r") as sam:
with open(basename + ".best.aligned.fa", "w") as fw:
for line in sam:
linestrip = line.rstrip()
linesplit = linestrip.split("\t")
for field in linesplit:
if ("XM:i:") in field:
mismatch = field.split(":")[2] # number of mismatches
mismatch = int(mismatch)
seq = linesplit[9]
seqlen = len(seq) # length of aligned read
identity = (seqlen - mismatch) / seqlen
dbmatch = linesplit[2] # match in database, NCBI id
readname = linesplit[0]
if identity >= idpercent and seqlen > minlen:
if readname not in readdict.keys():
readdict[readname] = [seq]
fw.write(">" + readname + "\n" + seq + "\n")
pprint(readdict)
|
"""
Minmal czi file example
"""
import czifile as zis
import numpy as np
import napari
file_name = 'data-njs/zeiss/CellDivision_T=10_Z=15_CH=2_DCV_small.czi'
czi = zis.CziFile(file_name)
cziarray = czi.asarray()
#cziarray = np.squeeze(cziarray)
print('czi file shape', cziarray.shape)
with napari.gui_qt():
napari.view_image(cziarray, channel_axis=2, scale=[1, 1, 3, 1, 1])
|
"""
Rushing Statistics Model
"""
from django.db.models import \
BooleanField, DecimalField, ForeignKey, Model, PROTECT
class RushingStats(Model):
"""
Contains the rushing statistics for a player
"""
player = ForeignKey(
'Player',
on_delete=PROTECT,
null=False
)
att_per_game = DecimalField(
max_digits=5,
decimal_places=1,
null=True
)
att_total = DecimalField(
max_digits=7,
decimal_places=0,
null=True
)
yds_total = DecimalField(
max_digits=7,
decimal_places=0,
null=True
)
yds_avg_per_att = DecimalField(
max_digits=5,
decimal_places=1,
null=True
)
yds_per_game = DecimalField(
max_digits=7,
decimal_places=1,
null=True
)
td_total = DecimalField(
max_digits=5,
decimal_places=0,
null=True
)
rush_max = DecimalField(
max_digits=5,
decimal_places=0,
null=True
)
rush_max_td = BooleanField(
)
rush_1st = DecimalField(
max_digits=5,
decimal_places=0,
null=True
)
rush_1st_pct = DecimalField(
max_digits=5,
decimal_places=1,
null=True
)
rush_20_yds = DecimalField(
max_digits=7,
decimal_places=0,
null=True
)
rush_40_yds = DecimalField(
max_digits=7,
decimal_places=0,
null=True
)
fumbles_total = DecimalField(
max_digits=7,
decimal_places=0,
null=True
)
class Meta:
db_table = "rushing_stats"
|
import baza1 as baza
import sqlite3
conn = sqlite3.connect('letalski_prevozi.db')
baza.ustvari_bazo_ce_ne_obstaja(conn)
conn.execute('PRAGMA foreign_keys = ON')
def commit(fun):
"""
Dekorator, ki ustvari kurzor, ga poda dekorirani funkciji,
in nato zapiše spremembe v bazo.
Originalna funkcija je na voljo pod atributom nocommit.
"""
def funkcija(*largs, **kwargs):
ret = fun(conn.cursor(), *largs, **kwargs)
conn.commit()
return ret
funkcija.__doc__ = fun.__doc__
funkcija.__name__ = fun.__name__
funkcija.__qualname__ = fun.__qualname__
fun.__qualname__ += '.nocommit'
funkcija.nocommit = fun
return funkcija
def id_karte(ime):
"""
Vrne ID karte, če je karta s tem imenom obstaja
Če karte ni, vrne False.
"""
vrstica = conn.execute("SELECT number FROM karta WHERE karta.ime = ?",[ime]).fetchone()
if vrstica is not None:
return vrstica
return False
def ime_potnika(let):
"""
Vrne ime igralca na podlagi leta na karti
"""
return ''.join(conn.execute("SELECT ime FROM karta WHERE let = ?",[let]).fetchone())
def vsi_prevozi():
"""
Vrne orvih 10 linij
"""
return conn.execute("""SELECT * FROM linije """).fetchone()
def prvih_deset():
"""
Vrne prvih 10 letov
"""
return conn.execute("""SELECT id, odhod, prihod, odhod_dan, cas_letenja, odhod_letalisce, prihod_letalisce
FROM let JOIN linije ON let.stevilka_leta = linije.koda
WHERE odhod > date('now')
ORDER BY odhod
LIMIT 50;
""").fetchall()
def poisci(id):
"""
Vrne vse podatke o letu glede na njegov ID.
"""
sql ="""SELECT id, odhod, prihod, odhod_dan, cas_letenja, odhod_letalisce, prihod_letalisce, letalisce_odhod.ime
FROM let
JOIN linije ON let.stevilka_leta = linije.koda
JOIN letalisce AS letalisce_odhod ON letalisce.koda_letalisca = odhod_letalisce
JOIN letalisce AS letalisce_prihod ON letalisce.koda_letalisca = odhod_letalisce
WHERE id = ?"""
return conn.execute(sql, [id]).fetchone()
def ime_letalisca(niz):
"""
Vrne ime letalisca glede na njegovo kodo
"""
sql ="""SELECT koda_letalisce FROM letalisca WHERE koda_letalisce LIKE ?"""
return conn.execute(sql, [niz]).fetchone()
|
#Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
import time
import ujson
import requests
import logging
from network import WLAN
# https://developers.google.com/maps/documentation/geolocation/intro
# {
# "homeMobileCountryCode": 310,
# "homeMobileNetworkCode": 410,
# "radioType": "gsm",
# "carrier": "Vodafone",
# "considerIp": "true",
# "wifiAccessPoints": [
# // See the WiFi Access Point Objects section below.
# ]
# }
# // WiFi Access Point Objects
# {
# "macAddress": "00:25:9c:cf:1c:ac",
# "signalStrength": -43,
# "age": 0,
# "channel": 11,
# "signalToNoiseRatio": 0
# }
# http://mcc-mnc.com/
logger = logging.getLogger(__name__)
class geolocate():
def __init__(self, google_api_key, my_ssid, wlan_check_interval = 1, mcc=262, mnc=11):
self.url = "https://www.googleapis.com/geolocation/v1/geolocate?key=" + google_api_key
# wlan configuration and information
self.wlan_scans = WLAN(mode=WLAN.STA)
self.wlan_timer = 0
self.wlan_check_interval = wlan_check_interval
self.wlan_number = 0
self.my_ssid = my_ssid
self.nets = None
self.rjson = None
self.mcc = mcc
self.mnc = mnc
def prettify(self, mac_binary):
return ':'.join('%02x' % (b) for b in mac_binary)
def scan_wlan(self):
logger.info(" wlan trying to scan")
self.nets = self.wlan_scans.scan()
self.wlan_timer = time.time()
self.wlan_number = len(self.nets)
logger.info(" wlan scan ready")
def wlan_nodes(self):
return self.wlan_number
def get_location(self):
valid = True
if (self.nets == None) or (time.time()-self.wlan_timer >= self.wlan_check_interval):
self.scan_wlan()
# initializing the json request
req = {}
req["homeMobileCountryCode"] = self.mcc
req["homeMobileNetworkCode"] = self.mnc
req["radioType"] = "gsm"
req["carrier"] = "O2"
req["considerIp"] = "false"
wlan_nodes = []
for net in self.nets:
if net.ssid != self.my_ssid:
#print("ssid found: " + str(net.ssid) + " " + str(self.prettify(net.bssid)))
wlan_node = {}
wlan_node["macAddress"] = str(self.prettify(net.bssid))
wlan_node["signalStrength"] = net.rssi
wlan_node["channel"] = net.channel
wlan_nodes.append(wlan_node)
req["wifiAccessPoints"] = wlan_nodes
try:
r = requests.post(self.url, json=ujson.dumps(req))
self.rjson = r.json()
except Exception as error:
logger.error(str(error))
raise
if (self.rjson.get("location") == None):
print(self.rjson)
valid = False
return valid, self.rjson
def get_location_string(self):
location_string = None
if (self.rjson.get("location") != None):
location_string = str(self.rjson['location']['lat']) + "," + str(self.rjson['location']['lng']) + "," + str(self.rjson['accuracy']) + "\n"
return location_string
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + run_control={"frozen": false, "read_only": false}
from __code.ui_builder import UiBuilder
o_builder = UiBuilder(ui_name = 'ui_radial_profile.ui')
from __code import system
from __code.fileselector import FileSelection
from __code.radial_profile import RadialProfile, SelectRadialParameters
# system.System.select_working_dir()
# from __code.__all import custom_style
# custom_style.style()
# + run_control={"frozen": false, "read_only": false}
# %gui qt
# + run_control={"frozen": false, "read_only": false}
import glob
import os
file_dir = '/Volumes/my_book_thunderbolt_duo/IPTS/IPTS-19621-CLOCK/CT/'
list_files = glob.glob(file_dir + '*.tiff')
o_selection = FileSelection()
o_selection.load_files(list_files)
o_select = SelectRadialParameters(working_dir=file_dir,
data_dict=o_selection.data_dict['sample'])
o_select.show()
# -
|
"""
This script auto-generates the
"""
import yaml
import os
import re
final_md = (
"""\
# Per rule explanation
This is an automatically generated list of all supported rules, their docstrings, and command. At the start of each \
workflow run a list is printed of which rules will be run. And while the workflow is running it prints which rules are \
being started and finished. This page is here to give an explanation to the user about what each rule does, and for \
developers to find what is, and isn't yet supported.
"""
)
path = "seq2science/rules/"
def get_dirty_docstrings(string):
splitter = re.compile("rule (.*):[\s\S]*?\"\"\"([\s\S]*?)\"\"\"", re.MULTILINE)
docstrings = {}
for match in splitter.finditer(string):
docstrings[match.group(1)] = match.group(2)
return docstrings
def cleanup(dirty):
clean = {}
for rule, docstring in dirty.items():
firstline = docstring.split("\n")[1]
indentation = len(firstline) - len(firstline.lstrip())
docstring = docstring.replace(" " * indentation, "")
docstring = docstring.replace(" " * (indentation - 4), "")
docstring = docstring.strip("\n")
clean[rule] = docstring
return clean
def get_dirty_shell(string):
splitter = re.compile("rule (.*):[\s\S]*?shell:[\s\S]*?\"\"\"[\s\S]([\s\S]*?)\"\"\"", re.MULTILINE)
shell_cmds = {}
for match in splitter.finditer(string):
shell_cmds[match.group(1)] = match.group(2)
return shell_cmds
all_rules_doc = {}
all_rules_shell = {}
for rules_file in os.listdir(path):
with open(path + rules_file, 'r') as file:
text = file.read()
shell_cmd = cleanup(get_dirty_shell(text))
all_rules_shell.update(shell_cmd)
docstrings = cleanup(get_dirty_docstrings(text))
all_rules_doc.update(docstrings)
for rule in sorted(all_rules_doc.keys()):
docstring = all_rules_doc[rule]
final_md += f"#### {rule}\n"
final_md += f"{docstring}\n"
if rule in all_rules_shell:
final_md += "```\n"
final_md += f"{all_rules_shell[rule]}\n"
final_md += "```\n"
final_md += f"\n"
with open("docs/content/all_rules.md", "w") as text_file:
text_file.write(final_md)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
This module contains functions to convert `DOM` relations to `path-like` lists
of elements defined by tag names and parameters.
"""
# Imports =====================================================================
# Functions & objects =========================================================
def el_to_path_vector(el):
"""
Convert `el` to vector of foregoing elements.
Attr:
el (obj): Double-linked HTMLElement instance.
Returns:
list: HTMLElements which considered as path from root to `el`.
"""
path = []
while el.parent:
path.append(el)
el = el.parent
return list(reversed(path + [el]))
def common_vector_root(vec1, vec2):
"""
Return common root of the two vectors.
Args:
vec1 (list/tuple): First vector.
vec2 (list/tuple): Second vector.
Usage example::
>>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0])
[1, 2]
Returns:
list: Common part of two vectors or blank list.
"""
root = []
for v1, v2 in zip(vec1, vec2):
if v1 == v2:
root.append(v1)
else:
return root
return root
def find_common_root(elements):
"""
Find root which is common for all `elements`.
Args:
elements (list): List of double-linked HTMLElement objects.
Returns:
list: Vector of HTMLElement containing path to common root.
"""
if not elements:
raise UserWarning("Can't find common root - no elements suplied.")
root_path = el_to_path_vector(elements.pop())
for el in elements:
el_path = el_to_path_vector(el)
root_path = common_vector_root(root_path, el_path)
if not root_path:
raise UserWarning(
"Vectors without common root:\n%s" % str(el_path)
)
return root_path
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
traverse_obj,
)
class MixchIE(InfoExtractor):
IE_NAME = 'mixch'
# allow omitting last /live in the URL, though it's likely uncommon
_VALID_URL = r'https?://(?:www\.)?mixch\.tv/u/(?P<id>\d+)'
TESTS = [{
'url': 'https://mixch.tv/u/16137876/live',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'https://mixch.tv/u/%s/live' % video_id
webpage = self._download_webpage(url, video_id)
initial_js_state = self._parse_json(self._search_regex(
r'(?m)^\s*window\.__INITIAL_JS_STATE__\s*=\s*(\{.+?\});\s*$', webpage, 'initial JS state'), video_id)
if not initial_js_state.get('liveInfo'):
raise ExtractorError('Live has ended.', expected=True)
title = traverse_obj(initial_js_state, ('liveInfo', 'title'))
comment_count = traverse_obj(initial_js_state, ('liveInfo', 'comments'))
view_count = traverse_obj(initial_js_state, ('liveInfo', 'visitor'))
timestamp = traverse_obj(initial_js_state, ('liveInfo', 'created'))
uploader = traverse_obj(initial_js_state, ('broadcasterInfo', 'name'))
# the service does not provide alternative resolutions
hls_url = traverse_obj(initial_js_state, ('liveInfo', 'hls')) or 'https://d1hd0ww6piyb43.cloudfront.net/hls/torte_%s.m3u8' % video_id
return {
'id': video_id,
'title': title,
'comment_count': comment_count,
'view_count': view_count,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': video_id,
'formats': [{
'format_id': 'hls',
'url': hls_url,
'protocol': 'm3u8',
'ext': 'mp4',
}],
'is_live': True,
'webpage_url': url,
}
|
# coding: utf8
from aenum import Enum
__author__ = "Timothy Heys"
__email__ = "theys@kayak.com"
class Arithmetic(Enum):
add = '+'
sub = '-'
mul = '*'
div = '/'
class Comparator(Enum):
pass
class Equality(Comparator):
eq = '='
ne = '<>'
gt = '>'
gte = '>='
lt = '<'
lte = '<='
class Matching(Comparator):
like = ' LIKE '
regex = ' REGEX '
bin_regex = ' REGEX BINARY '
class Boolean(Comparator):
and_ = 'AND'
or_ = 'OR'
xor_ = 'XOR'
class Order(Enum):
asc = 'ASC'
desc = 'DESC'
class JoinType(Enum):
inner = ''
left = 'LEFT'
right = 'RIGHT'
outer = 'OUTER'
class UnionType(Enum):
distinct = ''
all = ' ALL'
class DatePart(Enum):
year = 'YEAR'
quarter = 'QUARTER'
month = 'MONTH'
week = 'WEEK'
day = 'DAY'
hour = 'HOUR'
minute = 'MINUTE'
second = 'SECOND'
microsecond = 'MICROSECOND'
class SqlTypes(Enum):
SIGNED = 'SIGNED'
UNSIGNED = 'UNSIGNED'
utf8 = 'utf8'
DATE = 'DATE'
TIMESTAMP = 'TIMESTAMP'
VARCHAR = 'VARCHAR'
|
import subprocess
import tempfile
import SimpleITK as sitk
import numpy as np
from disptools import *
def jacobian_to_atrophy_map(image: sitk.Image) -> sitk.Image:
r""" Convert a Jacobian map to a atrophy map.
The atrophy rate :math:`a` is defined as the pointwise percentage of
volume loss, so :math:`a = -(J-1)` (where :math:`J` is the Jacobian).
Parameters
----------
image : sitk.Image
Input image.
Returns
-------
sitk.Image
Corresponding atrophy map.
"""
return -(image - 1.0)
def mask_to_simulatrophy_mask(
image: sitk.Image,
radius: int = None,
kernel: int = sitk.sitkBall,
) -> sitk.Image:
r""" Convert a binary mask to a Simul\@atrophy mask.
The mask used by Simul\@atrophy has five labels:
- 0: skull
- 1: cerebro-spinal fluid (CSF)
- 2: gray matter
- 3: white matter
- 4: falx cerebri
This function takes as input a binary mask, and returns another mask
in the Simul\@atrophy format, where the ROI of the original mask is
mapped to white matter, a surrounding region of CSF is created
around it, and the remaining is set to skull.
Parameters
----------
image : sitk.Image
Input binary mask.
radius : int
Radius for the dilation, determines the amount of CSF
surrounding the ROI. If `None`, all the volume outside the ROI
is set to CSF.
kernel : int
Kernel used for the dilation, among the values in
`itk::simple::KernelEnum`_.
.. _itk::simple::KernelEnum: https://itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#a38998f2c7b469b1ad8e337a0c6c0697b
Returns
-------
sitk.Image
A Simul\@atrophy mask constructed from the input mask.
"""
image = image > 0
dilated = sitk.BinaryDilate(image, radius, kernel) if radius is not None else 1
return 2 * image + dilated
def run(
jacobian : sitk.Image,
mask : sitk.Image,
scale : Tuple[float, float, float] = None,
sigma : float = 2.0,
lame : Tuple[float, float, float, float] = (1.0, 1.0, 1.0, 1.0),
origin : Tuple[int, int, int] = None,
size : Tuple[int, int, int] = None,
executable : str = None,
) -> sitk.Image:
r""" Wrapper around Simul\@atrophy.
Use Simul\@atrophy [7]_ to generate a displacement that realises the
volume changes prescribed by the input Jacobian map. Optionally, the
function can operate on a downsampled version of the image.
.. note::
Requires a working installation of Simul\@atrophy.
References
----------
.. [7] Khanal, Bishesh, Nicholas Ayache, and Xavier Pennec.
"Simulating Longitudinal Brain MRIs with Known Volume Changes
and Realistic Variations in Image Intensity." Frontiers in
neuroscience 11 (2017): 132.
Parameters
----------
jacobian : sitk.Image
Jacobian map.
mask : sitk.Image
Binary mask marking the ROI whose Jacobian shall be matched.
scale : Tuple[float, float, float]
If not ``None``, operate on an image downsampled by dividing
each size component by the given factors.
sigma : float
Amount of smoothing prior to resampling. Only relevant when
``scale`` is not ``None``.
lame : Tuple[float, float, float, float]
Lamé parameters, in the following order:
* :math:`\mu` within the ROI
* :math:`\mu` outside the ROI
* :math:`\lambda` within the ROI
* :math:`\lambda` outside the ROI
origin : Tuple[int, int, int]
If not `None` then only a region of the image is processed,
defined by origin and size. Requires to specify ``size``.
size : Tuple[int, int, int]
If not `None` then only a region of the image is processed,
defined by origin and size. Requires to specify ``origin``.
executable : str
Path to the Simul\@atrophy executable. If `None`, the executable
is searched within the system path.
Returns
-------
sitk.Image
A displacement field realising the volume changes of the given
Jacobian map.
"""
if executable is None:
executable = 'simul_atrophy'
with tempfile.TemporaryDirectory() as tmpdir:
atrophy_file = os.path.join(tmpdir, 'atrophy_map.mha')
mask_file = os.path.join(tmpdir, 'mask.mha')
args = [
executable,
'-parameters', ','.join([str(p) for p in lame]),
'-boundary_condition', 'dirichlet_at_walls',
'--relax_ic_in_csf',
'-atrophyFile', atrophy_file,
'-maskFile', mask_file,
'-imageFile', mask_file, # dummy parameter
'--invert_field_to_warp',
'-numOfTimeSteps', '1',
'-resPath', os.path.join(tmpdir, ''),
'-resultsFilenamesPrefix', 'out_',
]
if origin is not None and size is not None:
args += [
'-domainRegion', ' '.join([str(x) for x in list(origin) + list(size)]),
]
mask_s = mask_to_simulatrophy_mask(mask, radius=None)
atrophy = jacobian_to_atrophy_map(jacobian)
if scale is not None:
img_origin = jacobian.GetOrigin()
img_size = [x // s for x, s in zip(jacobian.GetSize(), scale)]
img_spacing = [x * s for x, s in zip(jacobian.GetSpacing(), scale)]
mask_s = sitk.Resample(mask_s, img_size, sitk.Transform(), sitk.sitkNearestNeighbor, img_origin, img_spacing)
atrophy = sitk.SmoothingRecursiveGaussian(atrophy, sigma)
atrophy = sitk.Resample(atrophy, img_size, sitk.Transform(), sitk.sitkBSpline, img_origin, img_spacing)
sitk.WriteImage(mask_s, mask_file)
sitk.WriteImage(atrophy, atrophy_file)
del mask_s
del atrophy
proc = subprocess.Popen(args, cwd=tmpdir)
proc.wait()
if proc.returncode != 0:
raise RuntimeError('Execution failed with status {}'.format(proc.returncode))
displacement = sitk.ReadImage(os.path.join(tmpdir, 'out_T1vel.nii.gz'))
if scale is not None:
displacement = sitk.SmoothingRecursiveGaussian(displacement, sigma)
displacement = sitk.Resample(displacement, jacobian, sitk.Transform(), sitk.sitkBSpline)
return displacement
|
# -*- coding: utf-8 -*-
import datetime
import matplotlib.pyplot as plt
import pandas as pd
class EPEsoMonthlyVariable():
"""A class for a monthly variable recorded in an `EPEsoSimulationEnvironment` instance.
.. note::
An EPEsoMonthlyVariable instance is returned as the result of
the `get_monthly_variable` or `get_monthly_variables` methods.
It should not be instantiated directly.
.. rubric:: Code Example
.. code-block:: python
>>> from eprun import EPEso
>>> eso=EPEso(r'simulation_files\eplusout.eso')
>>> env=eso.get_environment('RUN PERIOD 1')
>>> mv=env.get_monthly_variables()[0]
>>> print(type(mv))
<class 'eprun.epeso_monthly_variable.EPEsoMonthlyVariable'>
>>> print(mv.summary())
48 - TEST 352A - Other Equipment Total Heating Energy (J)
>>> print(mv.values[:5])
(942796800.0, 851558400.0, 942796800.0, 912384000.0, 942796800.0)
"""
def __repr__(self):
""
return 'EPEsoMonthlyVariable(report_code=%s)' % (self._report_code)
@property
def _data(self):
"""A dictionary with the variable data.
:rtype: dict
"""
return self._epesose._data['monthly_data'][self._report_code]
@property
def _monthly_periods(self):
"""The time periods object relating to the variable.
:rtype: EPEsoDailyPeriods
"""
return self._epesose.get_monthly_periods()
@property
def _variable_dictionary(self):
"""A dictionary with the variable data dicionary data
:rtype: dict
"""
return self._epesose._epeso._variable_dictionary[self._report_code]
def get_dataframe(self):
"""Returns a pandas dataframe of the monthly variable.
:rtype: pandas.DataFrame
"""
index=pd.Index(data=self._monthly_periods.get_periods(),
name='time_periods')
column_level_names=('object_name','quantity','unit','value_type')
data=[self.values,
self.min_values,
self.get_min_times(),
self.max_values,
self.get_max_times()]
columns=[[self.object_name]*5,
[self.quantity]*5,
[self.unit or '-']*5,
['value','min_value','min_time','max_value','max_time']]
columns=tuple(zip(*columns))
data=tuple(zip(*data))
df=pd.DataFrame(index=index,
data=data,
columns=pd.MultiIndex.from_tuples(columns,
names=column_level_names))
return df
def get_max_times(self):
"""Returns the times when the maximum values occur.
:rtype: tuple (datetime.datetime)
"""
month_start_times=self._monthly_periods.get_start_times()
result=[]
for month_start_time,max_day,max_hour,max_minute in zip(month_start_times,
self.max_days,
self.max_hours,
self.max_minutes):
t=datetime.datetime(month_start_time.year,
month_start_time.month,
max_day,
max_hour,
max_minute,
tzinfo=month_start_time.tzinfo
)
result.append(t)
return tuple(result)
def get_min_times(self):
"""Returns the times when the minumum values occur.
:rtype: tuple (datetime.datetime)
"""
month_start_times=self._monthly_periods.get_start_times()
result=[]
for month_start_time,min_day,min_hour,min_minute in zip(month_start_times,
self.min_days,
self.min_hours,
self.min_minutes):
t=datetime.datetime(month_start_time.year,
month_start_time.month,
min_day,
min_hour,
min_minute,
tzinfo=month_start_time.tzinfo
)
result.append(t)
return tuple(result)
@property
def max_days(self):
"""The day numbers for the maximum values of the monthly variable.
:rtype: tuple (int)
"""
return tuple(int(x) for x in self._data[6])
@property
def max_hours(self):
"""The hour numbers for the maximum values of the monthly variable.
:rtype: tuple (int)
"""
return tuple(int(x) for x in self._data[7])
@property
def max_minutes(self):
"""The minute numbers for the maximum values of the monthly variable.
:rtype: tuple (int)
"""
return tuple(int(x) for x in self._data[8])
@property
def max_values(self):
"""The maximum values of the monthly variable.
:rtype: tuple (float)
"""
return tuple(float(x) for x in self._data[5])
@property
def min_days(self):
"""The day numbers for the minimum values of the monthly variable.
:rtype: tuple (int)
"""
return tuple(int(x) for x in self._data[2])
@property
def min_hours(self):
"""The hour numbers for the minimum values of the monthly variable.
:rtype: tuple (int)
"""
return tuple(int(x) for x in self._data[3])
@property
def min_minutes(self):
"""The minute numbers for the minimum values of the monthly variable.
:rtype: tuple (int)
"""
return tuple(int(x) for x in self._data[4])
@property
def min_values(self):
"""The minimum values of the monthly variable.
:rtype: tuple (float)
"""
return tuple(float(x) for x in self._data[1])
@property
def object_name(self):
"""The object name of the monthly variable.
:rtype: str
"""
return self._variable_dictionary['object_name']
def plot(self,
ax=None,
**kwargs):
"""Plots the monthly variable on the supplied axes.
:param ax: An Axes instance.
Optional, if not supplied then automatically created.
:type ax: matplotlib.axes.Axes
:param kwargs: Keyword arguments to be supplied to the matplotlib plot call.
:returns: The Axes instance.
:rtype: matplotlib.axes.Axes
"""
if not ax:
fig, ax = plt.subplots(figsize=(16,4))
ax.plot(self.values)
ax.set_title('%s' % (self.summary()))
ax.set_ylabel('%s' % (self.unit))
return ax
@property
def quantity(self):
"""The quantity of the monthly variable.
:rtype: str
"""
return self._variable_dictionary['quantity']
@property
def report_code(self):
"""The report code of the daily variable.
:rtype: str
"""
return self._report_code
def summary(self):
"""Returns a summary of the daily variable.
:rtype: str
"""
return '%s - %s - %s (%s)' % (self.report_code,
self.object_name,
self.quantity,
self.unit or '-')
@property
def unit(self):
"""The unit of the monthly variable.
:rtype: str
"""
return self._variable_dictionary['unit']
@property
def values(self):
"""The (mean) values of the monthly variable.
:rtype: tuple (float)
"""
return tuple(float(x) for x in self._data[0])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 12:02:21 2018
@author: Niccolo' Dal Santo
@email : niccolo.dalsanto@epfl.ch
"""
import pyorb_core.error_manager as em
import numpy as np
import pyorb_core.algebraic_utils as alg_ut
def default_theta_function( _param, _q ):
em.error_raiser( 'SystemError', 'default_theta_function', "You are using the default theta function, please provide specific ones for your problem " )
pass
def default_full_theta_function( _param ):
em.error_raiser( 'SystemError', 'default_full_theta_function', "You are using the default full theta function, please provide specific ones for your problem " )
pass
class fom_problem( ):
def __init__( self, _parameter_handler, _external_engine = None, _fom_specifics = None ):
if _external_engine is not None and _fom_specifics is not None:
self.configure_fom( _external_engine, _fom_specifics )
self.define_theta_functions( )
self.M_parameter_handler = _parameter_handler
return
def get_theta_a( self, _param, _q ):
return self.M_theta_a( _param, _q )
def get_theta_f( self, _param, _q ):
return self.M_theta_f( _param, _q )
def get_full_theta_a( self, _param ):
return self.M_full_theta_a( _param )
def get_full_theta_f( self, _param ):
return self.M_full_theta_f( _param )
def define_theta_functions( self ):
em.error_raiser( 'SystemError', 'fom_problem::define_theta_functions', "You should define the theta function specific for your problem in the inherited class." )
return
# initialize anything which needs to be specified for using the external engine
def configure_fom( self, _external_engine, _fom_specifics ):
self.M_external_engine = _external_engine
self.set_fom_specifics( _fom_specifics )
self.M_external_engine.initialize_fom_simulation( _fom_specifics )
self.M_configured_fom = True
self.assemble_fom_natural_norm_matrix( self.M_fom_specifics )
return
def assemble_fom_natural_norm_matrix( self, _fom_specifics ):
self.check_configured_fom( )
self.M_natural_norm_matrix = self.M_external_engine.assemble_fom_natural_norm_matrix( self.M_fom_specifics )
def set_fom_specifics( self, _fom_specifics ):
self.M_fom_specifics = _fom_specifics
return
def update_fom_specifics( self, _fom_specifics_update ):
# self.M_fom_specifics.update( _fom_specifics_update )
print( "Updating the fom specifics dictionary" )
for key in _fom_specifics_update:
self.M_fom_specifics[key] = _fom_specifics_update[key]
return
def clear_fom_specifics( self, _fom_specifics_update ):
print( "Clearing the fom specifics dictionary" )
for key in _fom_specifics_update:
self.M_fom_specifics.pop( key )
return
def check_configured_fom( self ):
if self.M_configured_fom == False:
em.error_raiser( 'SystemError', 'fom_problem::retrieve_fom_data', "The fom problem has not been configured." )
return
# def compute_natural_norm( self, _solution ):
# self.check_configured_fom( )
# sol = self.M_external_engine.compute_natural_norm( _solution, self.M_fom_specifics )
#
# return sol
def solve_fom_problem( self, _param ):
self.check_configured_fom( )
sol = self.M_external_engine.solve_parameter( _param, self.M_fom_specifics )
return sol
def compute_fom_product( self, _basis, _q, _operator ):
print( "Performing compute_fom_product" )
product = self.M_external_engine.build_rb_affine_component( _basis, _q, _operator, self.M_fom_specifics )
return product.array
def retrieve_fom_affine_components( self, _operator, _num_affine_components ):
self.check_configured_fom( )
return self.M_external_engine.build_fom_affine_components( _operator, _num_affine_components, self.M_fom_specifics )
def retrieve_rb_affine_components( self, _operator ):
self.check_configured_fom( )
return self.M_external_engine.build_rb_affine_components( _operator, self.M_fom_specifics )
def assemble_fom_matrix( self, _param, _elements=[], _indices=[] ):
self.check_configured_fom( )
return self.M_external_engine.assemble_fom_matrix( _param, self.M_fom_specifics, _elements, _indices )
def assemble_fom_rhs( self, _param, _elements=[], _indices=[] ):
self.check_configured_fom( )
return self.M_external_engine.assemble_fom_rhs( _param, self.M_fom_specifics, _elements, _indices )
def get_num_parameters( self ):
return self.M_parameter_handler.get_num_parameters( )
def generate_parameter( self ):
return self.M_parameter_handler.generate_parameter( )
def get_parameter( self ):
self.M_current_parameter = self.M_parameter_handler.get_parameter( )
return self.M_current_parameter
def get_parameter_handler( self ):
return self.M_parameter_handler
def find_mdeim_elements_fom_specifics( self, _indices_mat ):
self.check_configured_fom( )
return self.M_external_engine.find_mdeim_elements_fom_specifics( self.M_fom_specifics, _indices_mat )
def find_deim_elements_fom_specifics( self, _indices ):
self.check_configured_fom( )
return self.M_external_engine.find_deim_elements_fom_specifics( self.M_fom_specifics, _indices )
def compute_natural_norm( self, _uh ):
Auh = alg_ut.sparse_matrix_vector_mul( self.M_natural_norm_matrix, _uh )
uh_norm = _uh.T.dot( Auh )
return np.sqrt( uh_norm )
M_parameter_handler = None
M_configured_fom = False
# engine used to perform offline computation relying on an external engine
M_external_engine = None
M_fom_specifics = None
M_natural_norm_matrix = None
# theta functions
M_theta_a = default_theta_function
M_theta_f = default_theta_function
M_full_theta_a = default_full_theta_function
M_full_theta_f = default_full_theta_function
M_current_parameter = np.zeros( 0 )
|
#!/usr/bin/python
"""
add
"""
from containernet.net import Containernet
from containernet.node import DockerSta, Docker
from containernet.cli import CLI
from containernet.term import makeTerm
from mininet.log import info, setLogLevel
from mn_wifi.link import wmediumd
from mn_wifi.wmediumdConnector import interference
import sys
import os
import yaml
def topology(args):
net = Containernet(link=wmediumd, wmediumd_mode=interference, noise_th=-91, fading_cof=3)
if len(args) != 2:
print("usage: network_config.py <config_file>")
else:
with open(args[1]) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
models = config['gazebo_models']
node_type = config['type']
pose = config['pose']
ip_list = config['ip_list']
image = config['image']
string1 = ""
string1 = string1.join(image)
#print(string1)
# print(imag)
sta_list = []
ap_list = []
info('*** Adding docker containers\n')
for idx, node in enumerate(models):
position = str(pose[idx]['position']['x']) + "," + str(pose[idx]['position']['y']) + "," + str(
pose[idx]['position']['z'])
#print(type(node), type(ip_list[idx]))
if node_type[idx] == "STATIC":
#sta_list.append(net.addStation('host%s' % idx, ip=ip_list[idx], mac='00:02:00:00:00:1%s' % idx,
# cls=DockerSta, dimage=string1, cpu_shares=20, position=position))
ap = idx%4
ap_list.append(net.addAccessPoint(node, ssid='new-ssid%s' %ap, mode='g', position=position,
failMode="standalone"))
#FIXME add channel parameters as well to the config file.
#net.addLink(sta_list[idx], ap_list[idx])#, cls=TCLink)
elif node_type[idx] == "MOBILE":
sta_list.append(net.addStation(node, ip=ip_list[idx],
cls=DockerSta, dimage=string1, cpu_shares=20, position=position))
#ap_list.append(0)
#T_op= net.addStation('tele', ip='10.0.0.1', mac='00:02:00:00:00:20',
# cls=DockerSta, dimage="cornet:focalfoxyNWH", cpu_shares=20, position='2,10,0')
#c0 = net.addController('c0')
h1 = net.addHost('h1', ip='10.0.0.1/24', cls=Docker, dimage="cornet:focalfoxyNWH", cpu_shares=20)
info("*** Configuring Propagation Model\n")
net.setPropagationModel(model="logDistance", exp=5.5)
# FIXME add propagation model as well to the config file.
#info('*** Adding switches\n')
#s1 = net.addSwitch('s1')
#for ap in ap_list:
# net.addLink(s1 , ap)
info('*** Configuring WiFi nodes\n')
net.configureWifiNodes()
if '-p' not in args:
net.plotGraph(max_x=100, max_y=100)
for sta in sta_list:
sta.cmd('service ssh restart')
h1.cmd('service ssh restart')
info('*** Starting network\n')
net.build()
for ap in ap_list:
#if ap != 0:
ap.start([])
ap.cmd("ovs-ofctl add-flow %s priority=1,arp,actions=flood" % ap)
net.socketServer(ip='127.0.0.1', port=12345)
info('*** Running CLI\n')
CLI(net)
os.system('sudo service network-manager start')
info('*** Stopping network\n')
net.stop()
if __name__ == '__main__':
os.system('sudo systemctl stop network-manager')
setLogLevel('info')
topology(sys.argv)
|
# _*_ coding: utf-8 _*_
# !/usr/bin/env python3
"""
Mictlantecuhtli: A Multi-Cloud Global Probe Mesh Creator.
@author: Collisio-Adolebitque
"""
from .__main__ import *
__version__ = '0.0.1'
__all__ = ["CommandParser", "Terraform", "AWS", "GCP", "Azure", "Alibaba"]
|
'''
==========================
Hyperparameter Selection 2
==========================
This example demonstrates how to do model selection in a pipeline where segments are learned directly by a neural network
'''
# Author: David Burns
# License: BSD
import matplotlib.pyplot as plt
from keras.layers import Dense, LSTM, Conv1D
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from seglearn.datasets import load_watch
from seglearn.pipe import Pype
from seglearn.split import TemporalKFold
from seglearn.transform import SegmentX
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
conv_filters=10, lstm_units=10):
# create a crnn model with keras with one cnn layers, and one rnn layer
input_shape = (width, n_vars)
model = Sequential()
model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
padding='valid', activation='relu', input_shape=input_shape))
model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
model.add(Dense(n_classes, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# load the data
data = load_watch()
X = data['X']
y = data['y']
# temporal splitting of data
splitter = TemporalKFold(n_splits=3)
Xs, ys, cv = splitter.split(X, y)
# create a segment learning pipeline
width = 100
pipe = Pype([('seg', SegmentX()),
('crnn', KerasClassifier(build_fn=crnn_model, epochs=1, batch_size=256, verbose=0))])
# create a parameter dictionary using the sklearn API
#
# you can also set a parameter to be always equal to another parameter, by setting its value to
# parameter name to track (this is an extension from sklearn)
#
# note that if you want to set a parameter to a single value, it will still need to be as a list
par_grid = {'seg__width': [50, 100, 200],
'seg__overlap': [0.],
'crnn__width': ['seg__width']}
clf = GridSearchCV(pipe, par_grid, cv=cv, verbose=2)
clf.fit(Xs, ys)
scores = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
plt.plot(par_grid['seg__width'], scores, '-o')
plt.title("Grid Search Scores")
plt.xlabel("Width [s]")
plt.ylabel("CV Average Score")
plt.fill_between(par_grid['seg__width'], scores - stds, scores + stds, alpha=0.2, color='navy')
plt.show()
|
def double_pole_fitness_func(target_len, cart, net):
def fitness_func(genes):
net.init_weight(genes)
return net.evaluate(cart, target_len)
return fitness_func
|
import pyperclip
def GetTime(text_header):
print(text_header)
hours = raw_input("Hours: ")
minutes = raw_input("Minutes: ")
seconds = raw_input("Seconds: ")
total_seconds = (int(hours) * 3600) + (int(minutes) * 60) + int(seconds)
print(total_seconds)
return total_seconds
url = raw_input("Youtube Video URL = ")
while True:
start_time = GetTime("Start Time");
end_time = GetTime("End Time");
html_embed_code = '<iframe width="300" height="169" src="' + url + '?rel=0&start=' + str(start_time) + ';end=' + str(end_time) + '" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>'
pyperclip.copy(html_embed_code)
print(html_embed_code)
print('\n\n')
|
def predict(x,u,A,B,P,Q):
"""
Prediction step of the Kalman filter
Input:
x - state vector
u - input vector
A - process matrix
B - input-to-state matrix
P - state covariance matrix
Q - evolution noise
Returns:
x - predicted state
P - predicted covariance
"""
xpls = A.dot(x) + B.dot(u)
P = A.dot(P).dot(A.T) + Q
return xpls, P
def update(y,xpls,C,P,R):
"""
Update step of the Kalman filter
Input:
y - measurement
xpls - predicted state
C - measurement matrix
P - covariance matrix
R - measurement noise
Returns:
x - updated step after measurement
P - updated covariance
K - Kalman gain
"""
K = P.dot(C.T).dot(np.linalg.pinv(C.dot(P).dot(C.T) + R))
# Update estimate via measurement
x = xpls + K.dot(y - C.dot(xpls))
# Update error covariance
P = P - K.dot(C).dot(P)
return x, P, K |
x=int(input())
for i in range(x):
print(x-i)
|
import os
import sys
import unittest
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
import numpy
has_numpy = True
except ImportError:
has_numpy = False
sys.path.insert(1, '../')
import f90nml
from f90nml.fpy import pybool
from f90nml.namelist import Namelist
from f90nml.findex import FIndex
class Test(unittest.TestCase):
def setUp(self):
self.empty_file = {}
self.empty_nml = {'empty_nml': {}}
self.null_nml = {
'null_nml': {'null_value': None},
'null_comma_nml': {'null_comma': None},
'null_nocomma_rpt_nml': {
'null_one': None,
'null_two': None,
}
}
self.unset_nml = {
'unset_nml': {
'x': None,
'y': None
}
}
self.types_nml = {
'types_nml': {
'v_integer': 1,
'v_float': 1.0,
'v_complex': 1+2j,
'v_logical': True,
'v_string': 'Hello',
}
}
self.vector_nml = {
'vector_nml': {
'v': [1, 2, 3, 4, 5],
'v_idx': [1, 2, 3, 4],
'v_idx_ooo': [1, 2, 3, 4],
'v_range': [1, 2, 3, 4],
'v_start_zero': [1, 2, 3, 4],
'v_start_minusone': [1, 2, 3, 4, 5],
'v_zero_adj': [1, None, 3, 4],
'v_zero_adj_ooo': [1, None, 3, 4],
'v_implicit_start': [1, 2, 3, 4],
'v_implicit_end': [1, 2, 3, 4],
'v_implicit_all': [1, 2, 3, 4],
'v_null_start': [None, 2, 3, 4],
'v_null_interior': [1, 2, None, 4],
'v_null_end': [1, 2, 3, None],
'v_zero': [1, 0, 3],
'v_stride': [1, None, 3, None, 5, None, 7],
'v_single': [1],
'v_implicit_merge': [1, 2],
'v_explicit_merge': [1, 2],
}
}
self.multidim_nml = {
'multidim_nml': {
'v2d': [[1, 2], [3, 4]],
'v3d': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'w3d': [[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
'v2d_explicit': [[1, 2], [3, 4]],
'v2d_outer': [[1], [2], [3], [4]],
'v2d_inner': [[1, 2, 3, 4]],
'v2d_sparse': [[1, 2], [], [5, 6]]
}
}
self.md_rowmaj_nml = {
'multidim_nml': {
'v2d': [[1, 3], [2, 4]],
'v3d': [[[1, 5], [3, 7]], [[2, 6], [4, 8]]],
'w3d': [[[1, 13], [5, 17], [9, 21]],
[[2, 14], [6, 18], [10, 22]],
[[3, 15], [7, 19], [11, 23]],
[[4, 16], [8, 20], [12, 24]]],
'v2d_explicit': [[1, 3], [2, 4]],
'v2d_outer': [[1, 2, 3, 4]],
'v2d_inner': [[1], [2], [3], [4]],
'v2d_sparse': [[1, None, 5], [2, None, 6]]
}
}
self.default_one_index_nml = {
'default_index_nml': {
'v': [1, 2, 3, 4, 5]
}
}
self.default_zero_index_nml = {
'default_index_nml': {
'v': [1, 2, None, 3, 4, 5]
}
}
self.global_index_nml = {
'global_index_nml': {
'v_zero': [1, 2, 3, 4],
'v_neg': [1, 2, 3, 4],
'v_pos': [None, 1, 2, 3, 4]
}
}
self.float_nml = {
'float_nml': {
'v_float': 1.,
'v_decimal_start': .1,
'v_decimal_end': 1.,
'v_negative': -1.,
'v_single': 1.,
'v_double': 1.,
'v_single_upper': 1.,
'v_double_upper': 1.,
'v_positive_index': 10.,
'v_negative_index': 0.1,
'v_no_exp_pos': 1.,
'v_no_exp_neg': 1.,
'v_no_exp_pos_dot': 1.,
'v_no_exp_neg_dot': 1.,
'v_neg_no_exp_pos': -1.,
'v_neg_no_exp_neg': -1.,
}
}
self.string_nml = {
'string_nml': {
'str_basic': 'hello',
'str_no_delim': 'hello',
'str_no_delim_no_esc': "a''b",
'single_esc_delim': "a 'single' delimiter",
'double_esc_delim': 'a "double" delimiter',
'double_nested': "''x'' \"y\"",
'str_list': ['a', 'b', 'c'],
'slist_no_space': ['a', 'b', 'c'],
'slist_no_quote': ['a', 'b', 'c'],
'slash': 'back\\slash',
}
}
self.dtype_nml = {
'dtype_nml': {
'dt_scalar': {'val': 1},
'dt_stack': {'outer': {'inner': 2}},
'dt_vector': {'vec': [1, 2, 3]}
},
'dtype_multi_nml': {
'dt': {
'x': 1,
'y': 2,
'z': 3,
}
},
'dtype_nested_nml': {
'f': {
'g': {
'x': 1,
'y': 2,
'z': 3,
}
}
},
'dtype_field_idx_nml': {
'f': {
'x': [1, 2, 3]}
},
'dtype_vec_nml': {
'a': {
'b': [
{'c': 1, 'd': 2},
{'c': 3, 'd': 4},
{'c': 5, 'd': 6}
]
}
},
'dtype_sparse_vec_nml': {
'a': {
'b': [{'c': 2}] # NOTE: start_index is 2
}
},
'dtype_single_value_vec_nml': {
'a': [{'b': 1}]
},
'dtype_single_vec_merge_nml': {
'a': {
'b': [{'c': 1, 'd': 2}]
}
}
}
self.dtype_case_nml = {
'dtype_mixed': {
'b': {
'c_d_e': [{'id': 1}, {'id': 2}]
}
},
'dtype_list_in_list': {
'b': {
'c': [
{'id': 1},
{'id': 2},
{'id': 3},
{'id': 4, 'd': {'e': [10, 11]}}
]
}
},
'dtype_upper_scalar': {
'b': {
'c': 1,
'd': [{'id': 2}],
}
},
'dtype_upper_list': {
'b': {
'c': [{'id': 1}, {'id': 2}]
}
},
'dtype_index_overwrite': {
'b': {
'c': [{'d': 1, 'e': 2, 'f': 3, 'g': 4, 'h': 5}]
}
},
'dtype_list_staggered': {
'b': {
'c': [
{'a': 1}, None, None, {'a': 1},
None, None, None, {'a': 1}
]
}
}
}
self.bcast_nml = {
'bcast_nml': {
'x': [2.0, 2.0],
'y': [None, None, None],
'z': [True, True, True, True],
},
'bcast_endnull_nml': {
'x': [2.0, 2.0],
'y': [None, None, None],
},
'bcast_mixed_nml': {
'x': [1, 1, 1, 2, 3, 4],
'y': [1, 1, 1, 2, 2, 3],
}
}
self.comment_nml = {
'comment_nml': {
'v_cmt_inline': 123,
'v_cmt_in_str': 'This token ! is not a comment',
'v_cmt_after_str': 'This ! is not a comment',
}
}
self.comment_alt_nml = {
'comment_alt_nml': {
'x': 1,
'z': 3}
}
self.grp_repeat_nml = {
'grp_repeat_nml': [{'x': 1}, {'x': 2}],
'case_check_nml': [{'y': 1}, {'y': 2}],
}
self.f77_nml = {
'f77_nml': {'x': 123},
'next_f77_nml': {'y': 'abc'},
}
self.dollar_nml = {'dollar_nml': {'v': 1.}}
self.multiline_nml = {
'multiline_nml': {
'x': [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1
]
}
}
self.ext_token_nml = {'ext_token_nml': {'x': 1}}
self.repatch_nml = {
'repatch_nml': {
'x': [5, 6],
'y': {'z': 7}
}
}
self.winfmt_nml = {'blah': {'blah': 1}}
if has_numpy:
self.numpy_nml = {
'numpy_nml': OrderedDict((
('np_integer', numpy.int64(1)),
('np_float', numpy.float64(1.0)),
('np_complex', numpy.complex128(1+2j)),
)
)
}
if os.path.isfile('tmp.nml'):
os.remove('tmp.nml')
# Support functions
def assert_file_equal(self, source_fname, target_fname):
with open(source_fname) as source:
with open(target_fname) as target:
source_str = source.read()
target_str = target.read()
self.assertEqual(source_str, target_str)
def assert_write(self, nml, target_fname):
self.assert_write_path(nml, target_fname)
self.assert_write_file(nml, target_fname)
def assert_write_path(self, nml, target_fname):
tmp_fname = 'tmp.nml'
f90nml.write(nml, tmp_fname)
try:
self.assert_file_equal(tmp_fname, target_fname)
finally:
os.remove(tmp_fname)
def assert_write_file(self, nml, target_fname):
tmp_fname = 'tmp.nml'
with open(tmp_fname, 'w') as tmp_file:
f90nml.write(nml, tmp_file)
self.assertFalse(tmp_file.closed)
try:
self.assert_file_equal(tmp_fname, target_fname)
finally:
os.remove(tmp_fname)
# Tests
def test_empty_file(self):
test_nml = f90nml.read('empty_file')
self.assertEqual(self.empty_file, test_nml)
def test_empty_nml(self):
test_nml = f90nml.read('empty.nml')
self.assertEqual(self.empty_nml, test_nml)
self.assert_write(test_nml, 'empty.nml')
def test_null(self):
test_nml = f90nml.read('null.nml')
self.assertEqual(self.null_nml, test_nml)
self.assert_write(test_nml, 'null_target.nml')
def test_unset(self):
test_nml = f90nml.read('unset.nml')
self.assertEqual(self.unset_nml, test_nml)
self.assert_write(test_nml, 'unset.nml')
def test_types(self):
test_nml = f90nml.read('types.nml')
self.assertEqual(self.types_nml, test_nml)
self.assert_write(test_nml, 'types.nml')
def test_vector(self):
test_nml = f90nml.read('vector.nml')
self.assertEqual(self.vector_nml, test_nml)
self.assert_write(test_nml, 'vector_target.nml')
def test_multidim(self):
test_nml = f90nml.read('multidim.nml')
self.assertEqual(self.multidim_nml, test_nml)
self.assert_write(test_nml, 'multidim_target.nml')
def test_rowmaj_multidim(self):
test_nml = f90nml.read('multidim.nml', row_major=True)
self.assertEqual(self.md_rowmaj_nml, test_nml)
def test_flag_syntax(self):
self.assertRaises(ValueError, f90nml.read, 'index_empty.nml',
row_major='abc')
self.assertRaises(ValueError, f90nml.read, 'index_empty.nml',
strict_logical='abc')
def test_float(self):
test_nml = f90nml.read('float.nml')
self.assertEqual(self.float_nml, test_nml)
self.assert_write(test_nml, 'float_target.nml')
def test_string(self):
test_nml = f90nml.read('string.nml')
self.assertEqual(self.string_nml, test_nml)
self.assert_write(test_nml, 'string_target.nml')
def test_dtype(self):
test_nml = f90nml.read('dtype.nml')
self.assertEqual(self.dtype_nml, test_nml)
self.assert_write(test_nml, 'dtype_target.nml')
def test_dtype_case(self):
test_nml = f90nml.read('dtype_case.nml')
self.assertEqual(self.dtype_case_nml, test_nml)
self.assert_write(test_nml, 'dtype_case_target.nml')
def test_bcast(self):
test_nml = f90nml.read('bcast.nml')
self.assertEqual(self.bcast_nml, test_nml)
self.assert_write(test_nml, 'bcast_target.nml')
def test_comment(self):
test_nml = f90nml.read('comment.nml')
self.assertEqual(self.comment_nml, test_nml)
self.assert_write(test_nml, 'comment_target.nml')
def test_comment_alt(self):
parser = f90nml.Parser()
parser.comment_tokens = '#'
test_nml = parser.read('comment_alt.nml')
self.assertEqual(self.comment_alt_nml, test_nml)
def test_grp_repeat(self):
test_nml = f90nml.read('grp_repeat.nml')
self.assertEqual(self.grp_repeat_nml, test_nml)
self.assert_write(test_nml, 'grp_repeat_target.nml')
def test_f77(self):
test_nml = f90nml.read('f77.nml')
self.assertEqual(self.f77_nml, test_nml)
self.assert_write(test_nml, 'f77_target.nml')
def test_dollar(self):
test_nml = f90nml.read('dollar.nml')
self.assertEqual(self.dollar_nml, test_nml)
self.assert_write(test_nml, 'dollar_target.nml')
def test_multiline(self):
test_nml = f90nml.read('multiline.nml')
self.assertEqual(self.multiline_nml, test_nml)
self.assert_write(test_nml, 'multiline.nml')
def test_multiline_index(self):
test_nml = f90nml.read('multiline_index.nml')
self.assertEqual(self.multiline_nml, test_nml)
self.assert_write(test_nml, 'multiline_index.nml')
def test_ext_token(self):
test_nml = f90nml.read('ext_token.nml')
self.assertEqual(self.ext_token_nml, test_nml)
def test_write_existing_file(self):
tmp_fname = 'tmp.nml'
open(tmp_fname, 'w').close()
test_nml = f90nml.read('empty.nml')
self.assertRaises(IOError, test_nml.write, tmp_fname)
os.remove(tmp_fname)
def test_pop_key(self):
test_nml = f90nml.read('empty.nml')
test_nml.pop('empty_nml')
self.assertEqual(test_nml, f90nml.namelist.Namelist())
def test_patch_paths(self):
patch_nml = f90nml.read('types_patch.nml')
f90nml.patch('types.nml', patch_nml, 'tmp.nml')
test_nml = f90nml.read('tmp.nml')
try:
self.assertEqual(test_nml, patch_nml)
finally:
os.remove('tmp.nml')
def test_patch_files(self):
patch_nml = f90nml.read('types_patch.nml')
with open('types.nml') as f_in:
with open('tmp.nml', 'w') as f_out:
f90nml.patch(f_in, patch_nml, f_out)
self.assertFalse(f_in.closed)
self.assertFalse(f_out.closed)
try:
test_nml = f90nml.read('tmp.nml')
self.assertEqual(test_nml, patch_nml)
finally:
os.remove('tmp.nml')
def test_patch_case(self):
patch_nml = f90nml.read('types_patch.nml')
f90nml.patch('types_uppercase.nml', patch_nml, 'tmp.nml')
test_nml = f90nml.read('tmp.nml')
try:
self.assertEqual(test_nml, patch_nml)
finally:
os.remove('tmp.nml')
def test_patch_valueerror(self):
self.assertRaises(ValueError, f90nml.patch, 'types.nml', 'xyz',
'tmp.nml')
def test_repatch(self):
f90nml.patch('repatch.nml', self.repatch_nml, 'tmp.nml')
test_nml = f90nml.read('tmp.nml')
try:
self.assertEqual(test_nml, self.repatch_nml)
finally:
os.remove('tmp.nml')
def test_default_patch(self):
patch_nml = f90nml.read('types_patch.nml')
f90nml.patch('types.nml', patch_nml)
test_nml = f90nml.read('types.nml~')
try:
self.assertEqual(test_nml, patch_nml)
finally:
os.remove('types.nml~')
# The above behavior is only for paths, not files
with open('types.nml') as nml_file:
self.assertRaises(ValueError, f90nml.patch, nml_file, patch_nml)
def test_no_selfpatch(self):
patch_nml = f90nml.read('types_patch.nml')
self.assertRaises(ValueError, f90nml.patch,
'types.nml', patch_nml, 'types.nml')
def test_comment_patch(self):
nml = {'comment_nml': {'v_cmt_inline': 456}}
try:
f90nml.patch('comment.nml', nml, 'tmp.nml')
self.assert_file_equal('comment_patch.nml', 'tmp.nml')
finally:
os.remove('tmp.nml')
def test_default_index(self):
parser = f90nml.Parser()
parser.default_start_index = 1
test_nml = parser.read('default_index.nml')
self.assertEqual(self.default_one_index_nml, test_nml)
parser.default_start_index = 0
test_nml = parser.read('default_index.nml')
self.assertEqual(self.default_zero_index_nml, test_nml)
def test_global_index(self):
parser = f90nml.Parser()
parser.global_start_index = 1
test_nml = parser.read('global_index.nml')
self.assertEqual(self.global_index_nml, test_nml)
def test_index_syntax(self):
self.assertRaises(ValueError, f90nml.read, 'index_empty.nml')
self.assertRaises(ValueError, f90nml.read, 'index_bad.nml')
self.assertRaises(ValueError, f90nml.read, 'index_bad_start.nml')
self.assertRaises(ValueError, f90nml.read, 'index_empty_end.nml')
self.assertRaises(ValueError, f90nml.read, 'index_bad_end.nml')
self.assertRaises(ValueError, f90nml.read, 'index_empty_stride.nml')
self.assertRaises(ValueError, f90nml.read, 'index_bad_stride.nml')
self.assertRaises(ValueError, f90nml.read, 'index_zero_stride.nml')
def test_f90repr(self):
nml = Namelist()
self.assertEqual(nml.f90repr(1), '1')
self.assertEqual(nml.f90repr(1.), '1.0')
self.assertEqual(nml.f90repr(1+2j), '(1.0, 2.0)')
self.assertEqual(nml.f90repr(True), '.true.')
self.assertEqual(nml.f90repr(False), '.false.')
self.assertEqual(nml.f90repr('abc'), "'abc'")
for ptype in ({}, [], set()):
self.assertRaises(ValueError, nml.f90repr, ptype)
def test_pybool(self):
for fstr_true in ('true', '.true.', 't', '.t.'):
self.assertEqual(pybool(fstr_true), True)
for fstr_false in ('false', '.false.', 'f', '.f.'):
self.assertEqual(pybool(fstr_false), False)
for fstr_true in ('ture', '.t'):
self.assertEqual(pybool(fstr_true, strict_logical=False), True)
for fstr_false in ('flase', '.f'):
self.assertEqual(pybool(fstr_false, strict_logical=False), False)
for fstr in ('ture', '.t', 'flase', '.f'):
self.assertRaises(ValueError, pybool, fstr)
for fstr in ('g', '.', 'xyz'):
self.assertRaises(ValueError, pybool, fstr, strict_logical=False)
def test_close_patch_on_error(self):
patch = {'tmp_nml': {'tmp_val': 0}}
self.assertRaises(ValueError, f90nml.patch, 'index_empty.nml', patch,
'tmp.nml')
os.remove('tmp.nml')
def test_indent(self):
test_nml = f90nml.read('types.nml')
test_nml.indent = 2
self.assert_write(test_nml, 'types_indent_2.nml')
test_nml.indent = '\t'
self.assert_write(test_nml, 'types_indent_tab.nml')
self.assertRaises(ValueError, setattr, test_nml, 'indent', -4)
self.assertRaises(ValueError, setattr, test_nml, 'indent', 'xyz')
self.assertRaises(TypeError, setattr, test_nml, 'indent', [1, 2, 3])
def test_colwidth(self):
test_nml = f90nml.read('multiline.nml')
test_nml.colwidth = 40
self.assert_write(test_nml, 'multiline_colwidth.nml')
self.assertRaises(ValueError, setattr, test_nml, 'colwidth', -1)
self.assertRaises(TypeError, setattr, test_nml, 'colwidth', 'xyz')
def test_end_comma(self):
test_nml = f90nml.read('types.nml')
test_nml.end_comma = True
self.assert_write(test_nml, 'types_end_comma.nml')
self.assertRaises(TypeError, setattr, test_nml, 'end_comma', 'xyz')
def test_uppercase(self):
test_nml = f90nml.read('types.nml')
test_nml.uppercase = True
self.assert_write(test_nml, 'types_uppercase.nml')
self.assertRaises(TypeError, setattr, test_nml, 'uppercase', 'xyz')
def test_floatformat(self):
test_nml = f90nml.read('float.nml')
test_nml.floatformat = '.3f'
self.assert_write(test_nml, 'float_format.nml')
self.assertRaises(TypeError, setattr, test_nml, 'floatformat', 123)
def test_logical_repr(self):
test_nml = f90nml.read('logical.nml', strict_logical=False)
test_nml.true_repr = 'T'
test_nml.false_repr = 'F'
self.assertEqual(test_nml.false_repr, test_nml.logical_repr[0])
self.assertEqual(test_nml.true_repr, test_nml.logical_repr[1])
self.assert_write(test_nml, 'logical_repr.nml')
test_nml.logical_repr = 'F', 'T'
self.assert_write(test_nml, 'logical_repr.nml')
self.assertRaises(TypeError, setattr, test_nml, 'true_repr', 123)
self.assertRaises(TypeError, setattr, test_nml, 'false_repr', 123)
self.assertRaises(ValueError, setattr, test_nml, 'true_repr', 'xyz')
self.assertRaises(ValueError, setattr, test_nml, 'false_repr', 'xyz')
self.assertRaises(TypeError, setattr, test_nml, 'logical_repr', 'xyz')
self.assertRaises(ValueError, setattr, test_nml, 'logical_repr', [])
def test_findex_iteration(self):
rng = [(None, 5, None)]
fidx = iter(FIndex(rng))
for i, j in enumerate(fidx, start=1):
self.assertEqual(i, j[0])
def test_dict_write(self):
self.assert_write(self.types_nml, 'types_dict.nml')
def test_dict_assign(self):
test_nml = f90nml.Namelist()
test_nml['dict_group'] = {'a': 1, 'b': 2}
try:
test_nml.write('tmp.nml')
finally:
os.remove('tmp.nml')
def test_winfmt(self):
test_nml = f90nml.read('winfmt.nml')
self.assertEqual(self.winfmt_nml, test_nml)
if has_numpy:
def test_numpy_write(self):
self.assert_write(self.numpy_nml, 'numpy_types.nml')
if __name__ == '__main__':
if os.path.isfile('tmp.nml'):
os.remove('tmp.nml')
unittest.main()
|
import gym
from rlkit.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer
from rlkit.torch.data_management.normalizer import CompositeNormalizer
from rlkit.torch.optim.mpi_adam import MpiAdam
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.relational.networks import *
from rlkit.torch.relational.modules import *
from rlkit.envs.multi_env_wrapper import MultiEnvWrapperHerTwinSAC
from rlkit.launchers.config import get_infra_settings
def stackonly(i):
if i <= 2:
return False
else:
return True
def experiment(variant):
import fetch_block_construction
env = gym.make(variant['env_id_template'].format(num_blocks=variant['replay_buffer_kwargs']['max_num_blocks'], stackonly=True))
env.unwrapped.render_image_obs = False
if variant['set_max_episode_steps']:
env.env._max_episode_steps = variant['set_max_episode_steps']
action_dim = env.action_space.low.size
robot_dim = 10
object_dim = 15
goal_dim = 3
object_total_dim = robot_dim + object_dim + goal_dim
shared_normalizer = CompositeNormalizer(object_dim + shared_dim + goal_dim,
action_dim,
default_clip_range=5,
reshape_blocks=True,
fetch_kwargs=dict(
lop_state_dim=3,
object_dim=object_dim,
goal_dim=goal_dim
))
policy = ReNNPolicy(
input_module_kwargs=dict(
normalizer=shared_normalizer,
object_total_dim=object_total_dim,
embedding_dim=64
),
graph_module_kwargs=dict(
object_total_dim=object_total_dim,
embedding_dim=64
),
readout_module_kwargs=dict(
embedding_dim=64
),
proj_kwargs=dict(
hidden_sizes=mlp_hidden_sizes,
obs_dim=variant['pooling_heads'] * embedding_dim,
action_dim=action_dim,
output_activation=torch.tanh,
layer_norm=layer_norm,
),
num_graph_modules=num_graph_modules,
)
qf1 = ReNN(
input_module_kwargs=dict(
normalizer=shared_normalizer,
object_total_dim=object_total_dim,
embedding_dim=64
),
graph_module_kwargs=dict(
object_total_dim=object_total_dim + action_dim,
embedding_dim=64
),
readout_module_kwargs=dict(
embedding_dim=64
),
proj_class=Mlp,
proj_kwargs=dict(
hidden_sizes=mlp_hidden_sizes,
output_size=1,
input_size=variant['pooling_heads'] * embedding_dim,
layer_norm=layer_norm
),
num_graph_modules=num_graph_modules,
)
qf2 = ReNN(
input_module_kwargs=dict(
normalizer=shared_normalizer,
object_total_dim=object_total_dim,
embedding_dim=64
),
graph_module_kwargs=dict(
object_total_dim=object_total_dim + action_dim,
embedding_dim=64
),
readout_module_kwargs=dict(
embedding_dim=64
),
proj_class=Mlp,
proj_kwargs=dict(
hidden_sizes=mlp_hidden_sizes,
output_size=1,
input_size=variant['pooling_heads'] * embedding_dim,
layer_norm=layer_norm
),
num_graph_modules=num_graph_modules,
)
vf = ReNN(
input_module_kwargs=dict(
normalizer=shared_normalizer,
object_total_dim=object_total_dim,
embedding_dim=64
),
graph_module_kwargs=dict(
object_total_dim=object_total_dim,
embedding_dim=64
),
readout_module_kwargs=dict(
embedding_dim=64
),
proj_class=Mlp,
proj_kwargs=dict(
hidden_sizes=mlp_hidden_sizes,
output_size=1,
input_size=variant['pooling_heads'] * embedding_dim,
layer_norm=layer_norm
),
num_graph_modules=num_graph_modules,
)
observation_key = 'observation'
desired_goal_key = 'desired_goal'
achieved_goal_key = desired_goal_key.replace("desired", "achieved")
replay_buffer = ObsDictRelabelingBuffer(
env=env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
**variant['replay_buffer_kwargs']
)
env_p = [(1/(variant['replay_buffer_kwargs']['max_num_blocks'])) for i in range(variant['replay_buffer_kwargs']['max_num_blocks'])]
algorithm = MultiEnvWrapperHerTwinSAC(
env_names=[variant['env_id_template'].format(num_blocks=i+1, stackonly=stackonly(i)) for i in range(variant['replay_buffer_kwargs']['max_num_blocks'])],
her_kwargs=dict(
observation_key='observation',
desired_goal_key='desired_goal',
** variant['her_kwargs']
),
tsac_kwargs=dict(
env=env,
qf1=qf1,
qf2=qf2,
vf=vf,
policy=policy,
optimizer_class=MpiAdam,
),
replay_buffer=replay_buffer,
env_probabilities=env_p,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
docker_img = "negativereward_gpfg_uniformxy"
if "rotctrl" in docker_img:
action_dim = 8
object_dim = 16
goal_dim = 7
else:
action_dim = 4
object_dim = 15
goal_dim = 3
shared_dim = 10
num_graph_modules = 3
num_query_heads = 1
embedding_dim = 64
layer_norm = True
max_num_blocks = 6
num_epochs_per_eval = 10
max_path_len = 50 * max_num_blocks * 4
max_episode_steps = 50 * max_num_blocks
mlp_hidden_sizes=[64, 64, 64]
mode = "here_no_doodad"
instance_type = "c5.18xlarge"
settings_dict = get_infra_settings(mode, instance_type=instance_type)
variant = dict(
algo_kwargs=dict(
num_epochs=3000 * 10,
max_path_length=max_path_len,
batch_size=256,
discount=0.98,
save_algorithm=True,
# collection_mode="online",
# num_updates_per_env_step=1,
collection_mode='batch', # TODO: set these settings from now on
num_updates_per_epoch=50 * max_num_blocks * 4,
num_steps_per_epoch=50 * max_num_blocks * 4, # Do one episode per block
num_steps_per_eval=50 * max_num_blocks * 10, # Do ten episodes per eval
num_epochs_per_eval=10, # TODO: change One episode per epoch, so this is roughly 10 episodes per eval * number of parallel episodes...
num_epochs_per_param_save=10 * 5, # TODO: set these settings for hypersweeps
num_gpus=settings_dict['num_gpus'],
# min_num_steps_before_training=10000,
#SAC args start
soft_target_tau=0.001,
policy_lr=3E-4,
qf_lr=3E-4,
vf_lr=3E-4,
grad_clip_max=1000
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_rollout_goals=0.2, # equal to k = 4 in HER paper
fraction_goals_env_goals=0.0,
num_relational=num_graph_modules,
num_heads=num_query_heads,
max_num_blocks=max_num_blocks
),
render=False,
env_id_template="FetchBlockConstruction_{num_blocks}Blocks_IncrementalReward_DictstateObs_42Rendersize_{stackonly}Stackonly-v1",
doodad_docker_image=F"richardrl/fbc:{docker_img}",
gpu_doodad_docker_image=F"richardrl/fbc:{docker_img}",
save_video=False,
save_video_period=50,
num_relational_blocks=num_graph_modules,
set_max_episode_steps=max_episode_steps,
mlp_hidden_sizes=mlp_hidden_sizes,
num_query_heads=num_query_heads,
action_dim=action_dim,
goal_dim=goal_dim,
embedding_dim=embedding_dim,
her_kwargs=dict(
exploration_masking=True
),
pooling_heads=1
)
test_prefix = "test_" if mode == "here_no_doodad" else input("Prefix: ")
print(f"test_prefix: {test_prefix}")
run_experiment(
experiment,
exp_prefix=F"{test_prefix}alpha_maxnumblocks{max_num_blocks}_numrelblocks{num_graph_modules}_nqh{num_query_heads}_dockimg{docker_img}", # Make sure no spaces..
region="us-west-2",
mode=mode,
variant=variant,
gpu_mode=settings_dict['gpu_mode'],
spot_price=10,
snapshot_mode='gap_and_last',
snapshot_gap=100,
num_exps_per_instance=1,
instance_type=instance_type,
python_cmd=F"mpirun --allow-run-as-root -np {settings_dict['num_parallel_processes']} python"
) |
import swgpy
from swgpy.object import *
from swgpy.sui import RadialMenu, RadialOptions, RadialOptionsList, RadialIdentifier
from swgpy.utility import vector3, quat
class PyRadialMenu(RadialMenu):
def buildRadial(self, owner, target, radials):
radial_list = RadialOptionsList()
radial_list.append(RadialOptions(0, RadialIdentifier.examine, 1, ''))
if owner.id == target.owner_id:
if owner.container().id == target.id:
radial_list.append(RadialOptions(0, RadialIdentifier.serverVehicleExit, 3, "@pet/pet_menu:menu_enter_exit"))
else:
radial_list.append(RadialOptions(0, RadialIdentifier.serverVehicleEnter, 3, "@pet/pet_menu:menu_enter_exit"))
radial_list.append(RadialOptions(0, RadialIdentifier.vehicleStore, 3, "@pet/pet_menu:menu_store"))
return radial_list
def handleRadial(self, owner, target, action):
if owner.id == target.owner_id:
if action == RadialIdentifier.serverVehicleExit or action == RadialIdentifier.serverVehicleEnter:
if owner.container().id == target.id and action == RadialIdentifier.serverVehicleExit:
#Exit
target.transfer(owner, owner,target.container(), target.position)
target.toggleStateOff(ACTION.MOUNTED_CREATURE)
elif action == RadialIdentifier.serverVehicleEnter:
#Enter
owner.container().transfer(owner, owner, target, vector3(0, 0, 0))
target.toggleStateOn(ACTION.MOUNTED_CREATURE)
elif action == RadialIdentifier.vehicleStore:
sim = self.getKernel().serviceManager().simulationService()
pcd = sim.findObjectById(target.getIntAttribute("pcd_id"))
if pcd:
if owner.container().id == target.id:
target.transfer(owner, owner, target.container(), target.position)
target.container().transfer(owner, target, pcd, vector3(0, 0, 0))
|
from django.contrib.auth import authenticate
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import permissions
from . import app_settings
def is_authenticated(user):
return user is not None and user.is_authenticated
def is_django_staff(user):
return user and (user.is_staff or user.is_superuser)
def is_staff(user):
if user is not None:
if is_django_staff(user):
return True
profile = getattr(user, 'sso_app_profile', None)
if profile is not None:
return profile.groups.filter(name__in=app_settings.STAFF_USER_GROUPS).count() > 0
return False
def try_authenticate(username, email, password):
credentials = username or email or None
user = None
if username is None and email is not None:
credentials = 'email'
if username is not None and email is None:
credentials = 'username'
if credentials is not None:
user = authenticate(username=credentials, password=password)
if user is not None:
return user
raise ObjectDoesNotExist('Check credentials.')
class OwnerPermission(permissions.IsAuthenticated):
message = 'You must be the owner.'
def has_object_permission(self, request, view, obj):
if (getattr(request, 'user', None) is not None and request.user == obj):
return True
return False
class StaffPermission(permissions.IsAuthenticated):
message = 'You must be a staff member.'
def has_permission(self, request, view):
if is_staff(request.user):
return True
return False
class NotDjangoStaffUserPermission(permissions.IsAuthenticated):
message = 'Django staff users are disabled.'
def has_permission(self, request, view):
if is_django_staff(request.user):
return False
return True
class OwnerOrStaffPermission(permissions.IsAuthenticated):
message = 'You must be the owner or a staff member.'
def has_object_permission(self, request, view, obj):
user = request.user
if user and (is_staff(user) or (user == getattr(obj, 'user', None))):
return True
return False
class PublicObjectOrOwnerOrStaffPermission(permissions.IsAuthenticated):
def has_object_permission(self, request, view, obj):
user = request.user
user_is_staff = is_staff(user)
if user_is_staff:
return True
is_public = getattr(obj, 'is_public', False)
has_owner = getattr(obj, 'user', False)
if (is_public and not has_owner) and not user_is_staff:
return False
if is_public:
return True
obj_user = getattr(obj, 'user', None)
return obj_user == user
|
# Generated by Django 3.1.13 on 2021-09-17 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokemon_entities', '0004_pokemonentity_pokemon'),
]
operations = [
migrations.AddField(
model_name='pokemonentity',
name='appeared_at',
field=models.DateTimeField(default=None),
preserve_default=False,
),
migrations.AddField(
model_name='pokemonentity',
name='disappeared_at',
field=models.DateTimeField(default=None),
preserve_default=False,
),
]
|
import numpy as np
def split_train_test_indices(num_docs, prop_to_split=0.5, seed=42):
np.random.seed(seed)
indices = np.arange(num_docs)
np.random.shuffle(indices)
num_sample = int(num_docs * prop_to_split)
train_indices = indices[:num_sample]
test_indices = indices[num_sample:]
return train_indices, test_indices
def cross_val_splits(num_docs, num_splits=10, seed=42):
np.random.seed(seed)
indices = np.arange(num_docs)
np.random.shuffle(indices)
split_size = (num_docs // num_splits)
split_indices = [indices[i*split_size:(i+1)*split_size] for i in range(num_splits-1)]
split_indices.append(indices[(num_splits-1)*split_size:])
return split_indices
def get_cv_split_assignments(num_docs, num_splits=10, seed=42):
np.random.seed(seed)
indices = np.arange(num_docs)
np.random.shuffle(indices)
split_size = (num_docs // num_splits)
split_indices = [indices[i*split_size:(i+1)*split_size] for i in range(num_splits-1)]
split_indices.append(indices[(num_splits-1)*split_size:])
split_assignment = np.zeros(num_docs)
for (s_idx, inds) in enumerate(split_indices):
split_assignment[inds]=s_idx
return split_assignment
|
import sys
import glob
KATAKANA = ('。「」、・ヲァィゥェォャョラッー'
'アイウエオカキクケコサシスセソタチツテトナニヌネノ'
'ハヒフヘホマミムメモヤユヨラリルレロワヲン')
args = sys.argv # text2imageを実行した先の一時フォルダ名を第1変数として渡す
# 半角カナ→全角カナ, 全角英数記号→半角
# 日本語的におかしい濁点・半濁点などが含まれる場合は厳密には考慮してない(前の文字によって変な結果になる)
# new_linesの項目削除で文字単位のbox座標が崩れるが、LSTM学習用に行ごとに座標をまとめるので結果的に影響しない
def normalize_text(lines):
new_lines = []
for i in range(len(lines)):
code = ord(lines[i][0])
if ord('!') <= code <= ord('}') and code != ord('\'): # 全角英数
new_lines.append(chr(code - 0xfee0) + lines[i][1:])
elif code == ord('゙'): # 濁点
if len(new_lines) > 0:
code_prev = ord(new_lines[-1][0])
if code_prev == ord('ウ'):
del new_lines[-1]
new_lines.append('ヴ' + lines[i][1:])
elif ord('か') <= code_prev <= ord('ホ'):
del new_lines[-1]
new_lines.append(chr(code_prev + 1) + lines[i][1:])
else:
new_lines.append('゛' + lines[i][1:])
else:
new_lines.append('゛' + lines[i][1:])
elif code == ord('゚'): # 半濁点
if len(new_lines) > 0:
code_prev = ord(new_lines[-1][0])
if ord('は') <= code_prev <= ord('ホ'):
del new_lines[-1]
new_lines.append(chr(code_prev + 2) + lines[i][1:])
else:
new_lines.append('゜' + lines[i][1:])
else:
new_lines.append('゜' + lines[i][1:])
elif ord('。') <= code <= ord('ン'):
new_lines.append(KATAKANA[code - 0xff61] + lines[i][1:])
elif code == ord('\\') or code == ord('¥'):
new_lines.append('¥' + lines[i][1:])
else:
new_lines.append(lines[i])
return new_lines
def main():
files = glob.glob(args[1] + '/*.box')
for filename in files:
with open(filename, 'r+', encoding='utf-8') as file:
lines = file.readlines()
file.seek(0) # 先頭に書き込み位置を移動
file.write(''.join(normalize_text(lines)))
file.truncate() # 書き込んだ位置までで切り詰める
if __name__ == '__main__':
main()
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import os
import requests
from .request_params import request_column, request_settings
from .utils import get_data_in_format, get_input_range
def get_new_port_number(experiment_name: str) -> json:
"""Get the latest episode number of real-time episode.
Args:
experiment_name (str): Name of the experiment.
Returns:
json: Number of episodes.
"""
params = {
"query": f"select count(episode) from {experiment_name}.port_details",
"count": "true"
}
episode_number_data = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=params
).json()
return episode_number_data
def get_port_data(experiment_name: str, episode: str, tick: str) -> json:
"""Get the port data within one tick.
Args:
experiment_name (str): Name of the experiment expected to be displayed.
episode (str) : Number of the episode of expected data.
tick (str): Number of tick of expected data.
Returns:
json: Formatted port value of current tick.
"""
params = {
"query": f"select {request_column.port_header.value} from {experiment_name}.port_details"
f" where episode='{episode}' and tick='{tick}'",
"count": "true"
}
db_port_data = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=params
).json()
return process_port_data(db_port_data)
def get_acc_port_data(experiment_name: str, episode: str, start_tick: str, end_tick: str) -> json:
"""Get the port data within a range.
Args:
experiment_name (str): Name of the experiment expected to be displayed.
episode (str) : Number of the episode of expected data.
start_tick (str): Number of tick to the start point of port data.
end_tick(str): Number of tick to the end point of port data.
Returns:
json: Jsonified formatted port value through a selected range.
"""
input_range = get_input_range(start_tick, end_tick)
query = f"select {request_column.port_header.value} from {experiment_name}.port_details"\
f" where episode='{episode}'"
if input_range != "()":
query += f" and tick in {input_range}"
params = {
"query": query,
"count": "true"
}
db_port_data = requests.get(
url=request_settings.request_url.value,
headers=request_settings.request_header.value,
params=params
).json()
return process_port_data(db_port_data)
def process_port_data(db_port_data: json) -> json:
"""Generate compulsory columns and process with topoly information.
Args:
db_port_data(json): Original port data.
Returns:
json: Jsonfied port value of current tick.
"""
exec_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))))
config_file_path = f"{exec_path}/nginx/static/"
with open(f"{config_file_path}port_list.json", "r", encoding="utf8")as port_list_file:
port_list = json.load(port_list_file)
port_list = port_list[0]["port_list"]
with open(f"{config_file_path}port.json", "r", encoding="utf8")as port_file:
port_json_data = json.load(port_file)
original_port_data = get_data_in_format(db_port_data)
original_port_data["port_name"] = list(
map(
lambda x: port_json_data[int(x)]['tooltip'],
original_port_data["index"]
)
)
original_port_data["position"] = list(
map(
lambda x: port_json_data[int(x)]['position'],
original_port_data["index"]
)
)
original_port_data["status"] = list(
map(
lambda x, y: 'surplus' if (x - y * 5 > 50) else ('demand' if (x - y * 5 < -50) else 'balance'),
original_port_data['empty'], original_port_data['booking']
)
)
port_data = original_port_data.to_json(orient='records')
return port_data
|
"""
列表基础操作list
遍历
"""
list_name = ["郭世鑫", "涛涛", "罗耀泽"]
# 1. 从头到尾读取
for item in list_name:
print(item)
# 2. 非从头到尾读取
# 需求:将姓名是2个字的人名改为空字符串
# -- 修改
for i in range(len(list_name)):
if len(list_name[i]) == 2:
list_name[i] = ""
# 需求:非从尾到头读取(一行一个)
# 因为切片会创建新(拷贝)列表,浪费内存
# for item in list_name[::-1]:# 2 1 0
# print(item)
# 开始:len(列表名)-1 最后一个索引
# 结束:-1 因为range不包含结束只,所以实际取到的是0
# 间隔:-1 倒序
for i in range(len(list_name) - 1, -1, -1): # 2 1 0
print(list_name[i])
# 开始:len(列表名)-1 最后一个元素
# 结束:-1 定位最后一个元素
# for item in list_name[len(list_name) - 1:-1:-1]:# 2 1 0
# print(item)
|
# The MIT License (MIT)
#
# Copyright (c) 2018 Carter Nelson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# MDR0 configuration data - the configuration byte is formed with
# single segments taken from each group and ORing all together.
COUNTER_BITS = (32, 24, 16, 8)
QUADRATURE_MODES = (0, 1, 2, 4)
# Count modes
NQUAD = 0x00 # non-quadrature mode
QUADRX1 = 0x01 # X1 quadrature mode
QUADRX2 = 0x02 # X2 quadrature mode
QUADRX4 = 0x03 # X4 quadrature mode
# Running modes
FREE_RUN = 0x00
SINGE_CYCLE = 0x04
RANGE_LIMIT = 0x08
MODULO_N = 0x0C
# Index modes
DISABLE_INDX = 0x00 # index_disabled
INDX_LOADC = 0x10 # index_load_CNTR
INDX_RESETC = 0x20 # index_rest_CNTR
INDX_LOADO = 0x30 # index_load_OL
ASYNCH_INDX = 0x00 # asynchronous index
SYNCH_INDX = 0x80 # synchronous index
# Clock filter modes
FILTER_1 = 0x00 # filter clock frequncy division factor 1
FILTER_2 = 0x80 # filter clock frequncy division factor 2
# MDR1 configuration data; any of these
# data segments can be ORed together
# Flag modes
NO_FLAGS = 0x00 # all flags disabled
IDX_FLAG = 0x10 # IDX flag
CMP_FLAG = 0x20 # CMP flag
BW_FLAG = 0x40 # BW flag
CY_FLAG = 0x80 # CY flag
# 1 to 4 bytes data-width
BYTE_4 = 0x00 # four byte mode
BYTE_3 = 0x01 # three byte mode
BYTE_2 = 0x02 # two byte mode
BYTE_1 = 0x03 # one byte mode
# Enable/disable counter
EN_CNTR = 0x00 # counting enabled
DIS_CNTR = 0x04 # counting disabled
# LS7366R op-code list
CLR_MDR0 = 0x08
CLR_MDR1 = 0x10
CLR_CNTR = 0x20
CLR_STR = 0x30
READ_MDR0 = 0x48
READ_MDR1 = 0x50
READ_CNTR = 0x60
READ_OTR = 0x68
READ_STR = 0x70
WRITE_MDR1 = 0x90
WRITE_MDR0 = 0x88
WRITE_DTR = 0x98
LOAD_CNTR = 0xE0
LOAD_OTR = 0xE4
class LS7366R():
"""LSI/CSI LS7366R quadrature counter."""
def __init__(self, spi):
# This should be a SpiDev or compatible object.
self._spi = spi
# Default config
self._write_mdr0(QUADRX4 | FREE_RUN | DISABLE_INDX | FILTER_1)
self._write_mdr1(BYTE_4 | EN_CNTR)
# Set to zero at start
self.counts = 0
@property
def counts(self):
"""Current counts as signed integer."""
return self._get_counts()
@counts.setter
def counts(self, value):
self._set_counts(value)
@property
def bits(self):
"""Counter bits."""
return COUNTER_BITS[self._read_mdr1()[0] & 0x03]
@bits.setter
def bits(self, value):
if value not in COUNTER_BITS:
raise ValueError("Bits must be one of ", *COUNTER_BITS)
self._write_mdr1(self._read_mdr1()[0] &0xFC | COUNTER_BITS.index(value))
@property
def quadrature(self):
"""Quadrature mode."""
return QUADRATURE_MODES[self._read_mdr0()[0] & 0x03]
@quadrature.setter
def quadrature(self, value):
if value not in QUADRATURE_MODES:
raise ValueError("Mode must be one of ", *QUADRATURE_MODES)
self._write_mdr0((self._read_mdr0()[0] & 0xFC) | QUADRATURE_MODES.index(value))
def _get_counts(self, ):
"""Read the counter register value."""
bits = self.bits
byte_values = self._read_cntr()
counts = 0
for b in byte_values:
counts <<= 8
counts |= b
if counts >> (bits - 1):
counts -= 1 << bits
return counts
def _set_counts(self, value):
"""Set the counter register value."""
self._write_dtr(value)
self._load_cntr()
def _clear_mdr0(self):
"""Clear MDR0."""
self._spi.writebytes([CLR_MDR0])
def _clear_mdr1(self):
"""Clear MDR1."""
self._spi.writebytes([CLR_MDR1])
def _clear_cntr(self):
"""Clear the counter."""
self._spi.writebytes([CLR_CNTR])
def _clear_str(self):
"""Clear the status register."""
self._spi.writebytes([CLR_STR])
def _read_mdr0(self):
"""Read the 8 bit MDR0 register."""
return self._spi.xfer2([READ_MDR0, 0x00])[1:]
def _read_mdr1(self):
"""Read the 8 bit MDR1 register."""
return self._spi.xfer2([READ_MDR1, 0x00])[1:]
def _read_cntr(self):
"""Transfer CNTR to OTR, then read OTR. Size of return depends
on current bit setting."""
return self._spi.xfer2([READ_CNTR]+[0]*(self.bits//8))[1:]
def _read_otr(self):
"""Output OTR."""
return self._spi.xfer2([READ_OTR]+[0]*(self.bits//8))[1:]
def _read_str(self):
"""Read 8 bit STR register."""
return self._spi.xfer2([READ_STR,0x00])[1:]
def _write_mdr0(self, mode):
"""Write serial data at MOSI into MDR0."""
self._spi.writebytes([WRITE_MDR0, mode])
def _write_mdr1(self, mode):
"""Write serial data at MOSI into MDR1."""
self._spi.writebytes([WRITE_MDR1, mode])
def _write_dtr(self, value):
"""Write to 32 bit DTR register."""
self._spi.writebytes([WRITE_DTR, value >> 24 & 0xFF,
value >> 16 & 0xFF,
value >> 8 & 0xFF,
value & 0xFF])
def _load_cntr(self):
"""Transfer DTR to CNTR."""
self._spi.writebytes([LOAD_CNTR])
def _load_otr(self):
"""Transfer CNTR to OTR."""
self._spi.writebytes([LOAD_OTR])
|
class Solution:
def countLargestGroup(self, n: int) -> int:
ans, d = 0, collections.defaultdict(list)
for i in range(1, n + 1):
# get sum of digs
s = sum([int(n) for n in str(i)])
d[s].append(i)
# get max group
mx = max([len(v) for k, v in d.items()])
return sum([1 for k, v in d.items() if len(v) == mx])
|
from . import export_d3po
from . import export_plotly
|
import time
import pygatt # type: ignore
from .constants import CHAR_TX, CHAR_FEEDBACK, PyHatchBabyRestSound
class PyHatchBabyRest(object):
""" A synchronous interface to a Hatch Baby Rest device using pygatt. """
def __init__(self, addr: str = None, adapter: pygatt.GATTToolBackend = None):
""" Instantiate the interface.
:param addr: A specific address to connect to.
:param adapter: An already instantiated `pygatt.GATTToolBackend`.
"""
if adapter is None:
self.adapter = pygatt.GATTToolBackend()
self.adapter.start()
else:
self.adapter = adapter
if addr is None:
devices = self.adapter.scan()
for device in devices:
if device["name"] == "Hatch Rest":
addr = device["address"]
break
else:
raise RuntimeError(
"No address provided and could not find device via scan."
)
self.device = self.adapter.connect(
addr, address_type=pygatt.BLEAddressType.random
)
self._refresh_data()
def _send_command(self, command: str):
""" Send a command to the device.
:param command: The command to send.
"""
self.device.char_write(CHAR_TX, bytearray(command, "utf-8"))
time.sleep(0.25)
self._refresh_data()
def _refresh_data(self) -> None:
""" Request updated data from the device and set the local attributes. """
response = [hex(x) for x in self.device.char_read(CHAR_FEEDBACK)]
# Make sure the data is where we think it is
assert response[5] == "0x43" # color
assert response[10] == "0x53" # audio
assert response[13] == "0x50" # power
red, green, blue, brightness = [int(x, 16) for x in response[6:10]]
sound = PyHatchBabyRestSound(int(response[11], 16))
volume = int(response[12], 16)
power = not bool(int("11000000", 2) & int(response[14], 16))
self.color = (red, green, blue)
self.brightness = brightness
self.sound = sound
self.volume = volume
self.power = power
def disconnect(self):
return self.device.disconnect()
def power_on(self):
command = "SI{:02x}".format(1)
self._send_command(command)
def power_off(self):
command = "SI{:02x}".format(0)
self._send_command(command)
def set_sound(self, sound):
command = "SN{:02x}".format(sound)
self._send_command(command)
def set_volume(self, volume):
command = "SV{:02x}".format(volume)
self._send_command(command)
def set_color(self, red: int, green: int, blue: int):
self._refresh_data()
command = "SC{:02x}{:02x}{:02x}{:02x}".format(red, green, blue, self.brightness)
self._send_command(command)
def set_brightness(self, brightness: int):
self._refresh_data()
command = "SC{:02x}{:02x}{:02x}{:02x}".format(
self.color[0], self.color[1], self.color[2], brightness
)
self._send_command(command)
@property
def connected(self):
return self.device._connected
|
# flake8: noqa
from tune_hyperopt.optimizer import HyperoptLocalOptimizer
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
host = "localhost"
port = 9311
method = "GET"
timeout = 1000
body = None
path = "/"
headers = ""
expected_response = {"v1": "current", "build": "0.1.34dev"}
# Typically an authenticated user session will make a request for a key to
# barbican
# The restful request in all likelihood contain an auth token
# this test mimics such a request provided a token
# if pki tokens are used, the token is rather large
# uuid tokens are smaller and easier to test with
# assume there is a "demo" user with only member role
# curl -XPOST -d '{"auth":{"passwordCredentials":{"username": "demo", \
# "password": "secret"}, "tenantName": "demo"}}' \
# -H "Content-type: application/json" http://localhost:35357/v2.0/tokens
#
# pull out the token_id from above and use in ping_barbican
#
#TODO(malini) flesh this out
def get_demo_token(password):
pass
def ping_barbican(token_id):
headers = {'X_AUTH_TOKEN': token_id, 'X_IDENTITY_STATUS': 'Confirmed'}
connection = httplib.HTTPConnection(host, port, timeout=timeout)
connection.request(method, path, None, headers)
response = connection.getresponse().read()
connection.close()
return response
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-31 06:32
from __future__ import unicode_literals
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('objects', '0005_auto_20150403_2339'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccountHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('xp_earned', models.SmallIntegerField(blank=0, default=0)),
('gm_notes', models.TextField(blank=True, null=True)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('synopsis', models.TextField(blank=True, null=True)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Clue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255)),
('rating', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'Value required to get this clue')),
('desc', models.TextField(blank=True, help_text=b'Description of the clue given to the player', verbose_name=b'Description')),
('red_herring', models.BooleanField(default=False, help_text=b'Whether this revelation is totally fake')),
('allow_investigation', models.BooleanField(default=False, help_text=b'Can be gained through investigation rolls')),
('allow_exploration', models.BooleanField(default=False, help_text=b'Can be gained through exploration rolls')),
('allow_trauma', models.BooleanField(default=False, help_text=b'Can be gained through combat rolls')),
('investigation_tags', models.TextField(blank=True, help_text=b'List keywords separated by semicolons for investigation', verbose_name=b'Keywords for investigation')),
],
),
migrations.CreateModel(
name='ClueDiscovery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(blank=True, help_text=b"Message for the player's records about how they discovered this.")),
('date', models.DateTimeField(blank=True, null=True)),
('discovery_method', models.CharField(help_text=b'How this was discovered - exploration, trauma, etc', max_length=255)),
('roll', models.PositiveSmallIntegerField(blank=0, default=0)),
],
),
migrations.CreateModel(
name='ClueForRevelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('required_for_revelation', models.BooleanField(default=True, help_text=b'Whether this must be discovered for the revelation to finish')),
('tier', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'How high in the hierarchy of discoveries this clue is, lower number discovered first')),
('clue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usage', to='character.Clue')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('gamedate', models.CharField(blank=True, max_length=80, null=True)),
],
),
migrations.CreateModel(
name='Episode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('synopsis', models.TextField(blank=True, null=True)),
('gm_notes', models.TextField(blank=True, null=True)),
('date', models.DateTimeField(blank=True, null=True)),
('chapter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='episodes', to='character.Chapter')),
],
),
migrations.CreateModel(
name='Investigation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ongoing', models.BooleanField(default=True, help_text=b'Whether this investigation is finished or not')),
('active', models.BooleanField(default=False, help_text=b'Whether this is the investigation for the week. Only one allowed')),
('automate_result', models.BooleanField(default=True, help_text=b"Whether to generate a result during weekly maintenance. Set false if GM'd")),
('results', models.TextField(blank=True, default=b"You didn't find anything.", help_text=b'The text to send the player, either set by GM or generated automatically by script if automate_result is set.')),
('actions', models.TextField(blank=True, help_text=b'The writeup the player submits of their actions, used for GMing.')),
('topic', models.CharField(blank=True, help_text=b'Keyword to try to search for clues against', max_length=255)),
('stat_used', models.CharField(blank=True, default=b'perception', help_text=b'The stat the player chose to use', max_length=80)),
('skill_used', models.CharField(blank=True, default=b'investigation', help_text=b'The skill the player chose to use', max_length=80)),
('silver', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'Additional silver added by the player')),
('economic', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'Additional economic resources added by the player')),
('military', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'Additional military resources added by the player')),
('social', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'Additional social resources added by the player')),
],
),
migrations.CreateModel(
name='Milestone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('synopsis', models.TextField(blank=True, null=True)),
('secret', models.BooleanField(default=False)),
('gm_notes', models.TextField(blank=True, null=True)),
('importance', models.PositiveSmallIntegerField(blank=0, default=0)),
('chapter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='milestones', to='character.Chapter')),
('episode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='milestones', to='character.Episode')),
],
),
migrations.CreateModel(
name='Mystery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('desc', models.TextField(blank=True, help_text=b'Description of the mystery given to the player when fully revealed', verbose_name=b'Description')),
('category', models.CharField(blank=True, help_text=b'Type of mystery this is - ability-related, metaplot, etc', max_length=80)),
],
options={
'verbose_name_plural': 'Mysteries',
},
),
migrations.CreateModel(
name='MysteryDiscovery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(blank=True, help_text=b"Message for the player's records about how they discovered this.")),
('date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('xp_earned', models.PositiveSmallIntegerField(blank=0, default=0)),
('karma_earned', models.PositiveSmallIntegerField(blank=0, default=0)),
('gm_notes', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, max_length=200, verbose_name=b'Name or description of the picture (optional)')),
('alt_text', models.CharField(blank=True, max_length=200, verbose_name=b"Optional 'alt' text when mousing over your image")),
('image', cloudinary.models.CloudinaryField(max_length=255, verbose_name=b'image')),
('owner', models.ForeignKey(blank=True, help_text=b'a Character owner of this image, if any.', null=True, on_delete=django.db.models.deletion.CASCADE, to='objects.ObjectDB', verbose_name=b'owner')),
],
),
migrations.CreateModel(
name='PlayerAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
('karma', models.PositiveSmallIntegerField(blank=0, default=0)),
('gm_notes', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Revelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255)),
('desc', models.TextField(blank=True, help_text=b'Description of the revelation given to the player', verbose_name=b'Description')),
('required_clue_value', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'The total value of clues to trigger this')),
('red_herring', models.BooleanField(default=False, help_text=b'Whether this revelation is totally fake')),
],
),
migrations.CreateModel(
name='RevelationDiscovery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(blank=True, help_text=b"Message for the player's records about how they discovered this.")),
('date', models.DateTimeField(blank=True, null=True)),
('discovery_method', models.CharField(help_text=b'How this was discovered - exploration, trauma, etc', max_length=255)),
],
),
migrations.CreateModel(
name='RevelationForMystery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('required_for_mystery', models.BooleanField(default=True, help_text=b'Whether this must be discovered for the mystery to finish')),
('tier', models.PositiveSmallIntegerField(blank=0, default=0, help_text=b'How high in the hierarchy of discoveries this revelation is, lower number discovered first')),
('mystery', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revelations_used', to='character.Mystery')),
('revelation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usage', to='character.Revelation')),
],
),
migrations.CreateModel(
name='Roster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('lock_storage', models.TextField(blank=True, help_text=b'defined in setup_utils', verbose_name=b'locks')),
],
),
migrations.CreateModel(
name='RosterEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gm_notes', models.TextField(blank=True)),
('inactive', models.BooleanField(default=False)),
('frozen', models.BooleanField(default=False)),
('sheet_style', models.TextField(blank=True)),
('lock_storage', models.TextField(blank=True, help_text=b'defined in setup_utils', verbose_name=b'locks')),
('character', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='roster', to='objects.ObjectDB')),
('current_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='characters', to='character.PlayerAccount')),
('player', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='roster', to=settings.AUTH_USER_MODEL)),
('previous_accounts', models.ManyToManyField(blank=True, through='character.AccountHistory', to='character.PlayerAccount')),
('profile_picture', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='character.Photo')),
('roster', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='entries', to='character.Roster')),
],
options={
'verbose_name_plural': 'Roster Entries',
},
),
migrations.CreateModel(
name='RPScene',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name=b'title of the scene')),
('synopsis', models.TextField(verbose_name=b'Description of the scene written by player')),
('date', models.DateTimeField(blank=True, null=True)),
('log', models.TextField(verbose_name=b'Text log of the scene')),
('lock_storage', models.TextField(blank=True, help_text=b'defined in setup_utils', verbose_name=b'locks')),
('character', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='character.RosterEntry')),
('milestone', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='log', to='character.Milestone')),
],
options={
'verbose_name_plural': 'RP Scenes',
},
),
migrations.CreateModel(
name='Story',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('synopsis', models.TextField(blank=True, null=True)),
('season', models.PositiveSmallIntegerField(blank=0, default=0)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
('current_chapter', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='current_chapter_story', to='character.Chapter')),
],
options={
'verbose_name_plural': 'Stories',
},
),
migrations.CreateModel(
name='StoryEmit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('chapter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='emits', to='character.Chapter')),
('episode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='emits', to='character.Episode')),
('sender', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='emits', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='revelationdiscovery',
name='character',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revelations', to='character.RosterEntry'),
),
migrations.AddField(
model_name='revelationdiscovery',
name='investigation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='revelations', to='character.Investigation'),
),
migrations.AddField(
model_name='revelationdiscovery',
name='milestone',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='revelation', to='character.Milestone'),
),
migrations.AddField(
model_name='revelationdiscovery',
name='revealed_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='revelations_spoiled', to='character.RosterEntry'),
),
migrations.AddField(
model_name='revelationdiscovery',
name='revelation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='discoveries', to='character.Revelation'),
),
migrations.AddField(
model_name='revelation',
name='characters',
field=models.ManyToManyField(blank=True, through='character.RevelationDiscovery', to='character.RosterEntry'),
),
migrations.AddField(
model_name='revelation',
name='mysteries',
field=models.ManyToManyField(through='character.RevelationForMystery', to='character.Mystery'),
),
migrations.AddField(
model_name='participant',
name='character',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='character.RosterEntry'),
),
migrations.AddField(
model_name='participant',
name='milestone',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='character.Milestone'),
),
migrations.AddField(
model_name='mysterydiscovery',
name='character',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mysteries', to='character.RosterEntry'),
),
migrations.AddField(
model_name='mysterydiscovery',
name='investigation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='mysteries', to='character.Investigation'),
),
migrations.AddField(
model_name='mysterydiscovery',
name='milestone',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='mystery', to='character.Milestone'),
),
migrations.AddField(
model_name='mysterydiscovery',
name='mystery',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='discoveries', to='character.Mystery'),
),
migrations.AddField(
model_name='mystery',
name='characters',
field=models.ManyToManyField(blank=True, through='character.MysteryDiscovery', to='character.RosterEntry'),
),
migrations.AddField(
model_name='milestone',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='milestones', to='character.Photo'),
),
migrations.AddField(
model_name='milestone',
name='participants',
field=models.ManyToManyField(blank=True, through='character.Participant', to='character.RosterEntry'),
),
migrations.AddField(
model_name='milestone',
name='protagonist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='milestones', to='character.RosterEntry'),
),
migrations.AddField(
model_name='investigation',
name='character',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='investigations', to='character.RosterEntry'),
),
migrations.AddField(
model_name='investigation',
name='clue_target',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='character.Clue'),
),
migrations.AddField(
model_name='comment',
name='milestone',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='character.Milestone'),
),
migrations.AddField(
model_name='comment',
name='poster',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='character.RosterEntry'),
),
migrations.AddField(
model_name='comment',
name='reply_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='character.Comment'),
),
migrations.AddField(
model_name='comment',
name='target',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments_upon', to='character.RosterEntry'),
),
migrations.AddField(
model_name='clueforrevelation',
name='revelation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clues_used', to='character.Revelation'),
),
migrations.AddField(
model_name='cluediscovery',
name='character',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clues', to='character.RosterEntry'),
),
migrations.AddField(
model_name='cluediscovery',
name='clue',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='discoveries', to='character.Clue'),
),
migrations.AddField(
model_name='cluediscovery',
name='investigation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clues', to='character.Investigation'),
),
migrations.AddField(
model_name='cluediscovery',
name='milestone',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clue', to='character.Milestone'),
),
migrations.AddField(
model_name='cluediscovery',
name='revealed_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clues_spoiled', to='character.RosterEntry'),
),
migrations.AddField(
model_name='clue',
name='characters',
field=models.ManyToManyField(blank=True, through='character.ClueDiscovery', to='character.RosterEntry'),
),
migrations.AddField(
model_name='clue',
name='revelations',
field=models.ManyToManyField(through='character.ClueForRevelation', to='character.Revelation'),
),
migrations.AddField(
model_name='chapter',
name='story',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='previous_chapters', to='character.Story'),
),
migrations.AddField(
model_name='accounthistory',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='character.PlayerAccount'),
),
migrations.AddField(
model_name='accounthistory',
name='entry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='character.RosterEntry'),
),
]
|
from xml_collation.core_functions import collate_xml
from xml_collation.core_functions import collate_xml_example
from xml_collation.core_functions import collate_xml_svg
__all__ = ["collate_xml", "collate_xml_example", "collate_xml_svg"]
|
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from pycmbs.data import Data
from pycmbs.diagnostic import PatternCorrelation
import matplotlib.pyplot as plt
import numpy as np
file_name = '../../../pycmbs/examples/example_data/air.mon.mean.nc'
A = Data(file_name, 'air', lat_name='lat', lon_name='lon', read=True, label='air temperature')
B = A.copy()
B.mulc(2.3, copy=False)
B.data = B.data + np.random.random(B.shape)*100.
# calculate spatial correlation for all timesteps ...
P = PatternCorrelation(A,B)
# ... and vizalize it
P.plot()
plt.show()
|
#!usr/bin/python
# # -*- coding:utf8 -*-
import os
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("base.html")
class ErrorHandler(tornado.web.RequestHandler):
def get(self):
self.render("500.html", msg="We're sorry, but something went wrong.")
def make_app():
return tornado.web.Application(
[
(r"/", MainHandler),
(r"/500", ErrorHandler)
],
template_path=os.path.join(
os.path.dirname(__file__), "templates"
),
debug=True
)
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
#!/usr/bin/env python
import cvtestutils
import unittest
from cv import *
class moments_test(unittest.TestCase):
def setUp(self):
# create an image
img = cvCreateMat(100,100,CV_8U);
cvZero( img )
# draw a rectangle in the middle
cvRectangle( img, cvPoint( 25, 25 ), cvPoint( 75, 75 ), CV_RGB(255,255,255), -1 );
self.img = img
# create the storage area
self.storage = cvCreateMemStorage (0)
# find the contours
nb_contours, self.contours = cvFindContours (img,
self.storage,
sizeof_CvContour,
CV_RETR_LIST,
CV_CHAIN_APPROX_SIMPLE,
cvPoint (0,0))
def test_cvMoments_CvMat( self ):
m = CvMoments()
cvMoments( self.img, m, 1 )
def test_cvMoments_CvSeq( self ):
m = CvMoments()
# Now test with CvSeq
for contour in self.contours.hrange():
cvMoments( contour, m, 1 )
def suite():
return unittest.TestLoader().loadTestsFromTestCase(moments_test)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
import statistics
from tqdm.notebook import tqdm
def uncertainty_rfr_qfr(df, X, Y, o, d_start=12, label_start=0, label_end=5,
true_y=True, max_depth=5, num_trees=1000,
min_samples_split=2, rs_split=130):
'''
This function calculates uncertainty for predicted values based on a
RFR model trained on a specific output. It's based off the idea of a
quantile regression forest. The model records ALL observed responses in
each tree leaf in the forest (instead of recording the mean val of the
response variable in each tree leaf). This allows confidence intervals to
be calculated by analyzing the distribution of the response variables on
the leaves. This is put into practice in python by fully expanding a tree
so that each leaf has one val (ie min_samples_leaf=1).
Inputs
- df: pd df. Dataframe to train the model. Therefore the df must
have X and Y (actual) values.
- X: pd df. The data points you are trying to predict uncertainties
on. ex: X_test or X of a new dataset (possibly with no Y)
- Y: The actual values of the data points. ex: Y_test, if no actual
vals put anything here, and make act='no'
- label_start: int. start col index of where type/ab/m/site.
- label_end: int. end col index of where type/ab/m/site.
- true_y: binary. True if there are actual values associated with X,
if no actual values (ie brand new points) then False (default:True)
- o: int. Which output to train the model on (corresponds to column
index in "output")
- num_trees: int. Number of estimators (trees) to by used by the
RFR in the random forest. Default:100.
- max_feat: str. The number of features to consider when looking for
the best split. Default: 'auto'
- max_depth: int. The maximum depth of each tree in the forest.
Keep this value low to mitigate overfitting. Default:5.
- min_samples_split: int. The minimum number of samples required to
split an internal node. Default: 5.
- rs_split: int. value on which to start the random state generator.
Ensures the bootstrapping in the RFR is consistent across runs.
Outputs
- err_df: pd df. contains idx of original datapoint, actual target
value (if it exists), 95% confidence inverval and lower limit/upper
limit of predictions, mean prediction, std. dev of predicions.
'''
# This section of code is all for training the model
descriptors = df.columns[d_start:]
# output = df.columns[o]
P = df[descriptors]
s = df[df.columns[label_start:label_end]]
X_train, X_test, y_train, y_test = \
train_test_split(P, s, test_size=0.22, random_state=rs_split,
stratify=s[['Type', 'Site']])
clf = RandomForestRegressor(n_estimators=num_trees, max_depth=max_depth,
min_samples_leaf=1,
min_samples_split=min_samples_split,
random_state=130, n_jobs=-1)
# train the RFR model on the descriptors and specific output
clf.fit(X_train[descriptors], y_train[y_train.columns[o]])
# make predictions on the NEVER BEFORE SEEN points
err_down = []
err_up = []
err_mean = []
err_stddev = []
X_arr = np.array(X)
for x in tqdm(np.arange(len(X_arr))):
preds = []
for pred in clf.estimators_:
preds.append(pred.predict(X_arr[x].reshape(1, -1))[0])
err_down.append(np.quantile(preds, 0.025))
err_up.append(np.quantile(preds, 0.975))
err_mean.append(np.mean(preds))
err_stddev.append(statistics.pstdev(preds))
if true_y is True:
Y_arr = np.array(Y)
elif true_y is False:
Y_arr = np.zeros(X_arr.shape[0])
truth = Y_arr
index = list(X.index)
d = {'index': index, 'actual': truth, 'err_down': err_down,
'err_up': err_up, 'predicted': err_mean, 'std_dev': err_stddev}
err_df = pd.DataFrame(data=d)
err_df['err_interval'] = err_df['err_up'] - err_df['err_down']
err_df.set_index('index', inplace=True)
return err_df
def descriptors_outputs(df, d_start, o):
'''
This function splits to dataframe up into separate dataframes of
descriptors and outputs by column.
Inputs
- df: pandas df. A ML training dataset that contains targets and
features.
- d_start: int. column index to that the descriptors columns start at.
In the input df, the descriptors must start at some column at index
df_start to the last column in the dataframe. Default: 3.
- o: int. column index of the output. Deafult: 0.
Outputs
- X: pandas df. Dataframe with descriptors.
- y: pandas df. Dataframe with output.
'''
X = df[df.columns[d_start:]]
y = df[df.columns[o]]
return X, y
def traintest(X, y, train_idx, test_idx):
'''
This function splits the descriptors (X) and output (y) points into train
and test sets. The size of test set depends on the number of folds of CV.
Inputs
- X: pandas df. Dataframe with descriptors.
- y: pandas df. Dataframe with output.
- train_idx: np array. Indexes of training points.
- test_idx: np array. Indexes of testing points.
Outputs
- X_train: np array. descriptor values of training data set.
- y_train: np array. output values of training data set.
'''
X_train = X.iloc[list(train_idx)]
y_train = y.iloc[list(train_idx)]
return X_train, y_train
def predict_append(clf, N_arr, n, preds):
'''
Appends prediction from the RFR model for every point in that fold to a
list. List will be added to a np array.
Inputs
- clf: RandomForestRegressor from sklearn
- N_arr: np array. X descriptors made into an array.
- n: int. index of N_array to predict on.
- preds: list. list to append prediction values to.
Outputs
- pred: list. list with appended values. Will be added to an array
as one one at the position k-fold (ie first fold is first row).
'''
pred = clf.predict(N_arr[n].reshape(1, -1))[0]
preds.append(pred)
return preds
def dft_points(true_y, Y, N_arr):
'''
makes or pulls values to be added to final df err_df.
Inputs
- true_y: bool. If True, there are true y-values to append. if False,
the array is 0's
- Y: if true_y is True, this should be a pd df. if true_y is False,
put anything here.
- N_arr: np array. X descriptors made into an array.
Outputs
- Y_arr: np array. either true Y values or 0's to be added to
the columns 'true_y' in err_df
'''
if true_y is True:
Y_arr = np.array(Y)
elif true_y is False:
Y_arr = np.zeros(N_arr.shape[0])
return Y_arr
def uncert_table(N, X, type_col, ab_col, site_col, imp_col, Y_arr,
pred_df_desc):
'''
Makes a nice output table of the mean value and std dev per point in X,
and std dev. Also includes index of n, type, sc, site, impurity and
true_y(if applicable).
Inputs:
- N: pd df. X descriptors, formed using x_start.
- X: pd df. The data points you are trying to predict uncertainties
on. ex: X_test or X of a new dataset (possibly with no Y)
- type_col: int. column index of 'Type' column in X.
- ab_col: int. column index of 'AB' column in X.
- site_col: int. column index of 'Site' column in X.
- imp_col: int. column index of 'Impurity/M' column in X.
- Y_arr: np array. either true Y values or 0's to be added to
the columns 'true_y' in err_df
- pred_df_desc: pd df. call describe on the np array that has all the
predicted values across the folds to get mean and std dev.
Outputs:
- err_df: pd df. A dataframe with the type, sc, site, impurity, mean
and std dev across k-folds for every point in X.
'''
d = {'index': list(N.index), 'Type': list(X[X.columns[type_col]]),
'AB': list(X[X.columns[ab_col]]),
'Site': list(X[X.columns[site_col]]),
'Impurity': list(X[X.columns[imp_col]]), 'true val': Y_arr,
'mean': pred_df_desc.T['mean'], 'std': pred_df_desc.T['std']}
err_df = pd.DataFrame(data=d)
err_df.set_index('index', inplace=True)
return err_df
def uncertainty_rfr_cv(df, X, Y, o, d_start=5, x_start=4, true_y=False,
max_depth=5, num_trees=100, min_samp_leaf=2,
min_samples_split=2, max_feat='auto', folds=5,
type_col=0, ab_col=1, site_col=2, imp_col=3):
'''
This function calculates uncertainty for predicted values based on cross
validation of a RFR model. A model is fit on some part of the data (all
data but the k-fold), and then predicts a val for each of a set of unknown
points. It does this k-fold times, and then the mean of the k-fold
predictions and standard deviation of the k-fold predictions is calculated
for each of the unknown poitns. The unknown points and the training data
must have the same descriptors.
Inputs
- df: pd df. Dataframe to train the model. Therefore the df must
have X and Y (actual) values.
- X: pd df. The data points you are trying to predict uncertainties
on. ex: X_test or X of a new dataset (possibly with no Y)
- Y: pd df. The true values of the data points. ex: Y_test, if no
actual vals put anything here, and make true_y= False
- o: int. Which output to train the model on (corresponds to column
index in "output")
- d_start: int. column index that the descriptors columns start in df.
In the input df, the descriptors must start at some column at index
df_start to the last column in the dataframe. Default: 5.
- x_start: int. column index that the descriptor columns start in X.
Default:5
- true_y: binary. True if there are actual values associated with X,
if no actual values (ie brand new points) then False (default:True)
- num_trees: int. Number of estimators (trees) to by used by the
RFR in the random forest. Default:100.
- max_feat: str. The number of features to consider when looking for
the best split. Default: 'auto'
- min_samp_leaf: int. The minimum number of samples required to be at
a leaf node. Deafult: 2.
- max_depth: int. The maximum depth of each tree in the forest.
Keep this value low to mitigate overfitting. Default:5.
- min_samples_split: int. The minimum number of samples required to
split an internal node. Default: 5.
- folds: int. Number of folds to to split the data in cross validation.
Default: 5.
- type_col: int. column index of 'Type' column in X.
- ab_col: int. column index of 'AB' column in X.
- site_col: int. column index of 'Site' column in X.
- imp_col: int. column index of 'Impurity/M' column in X.
Outputs
- pred_df: pd df. A dataframe that contains all the values predicted
from the model (all the folds x all the points).
- err_df: pd df. A dataframe with the type, sc, site, impurity, mean
and std dev across k-folds for every point in X.
'''
descriptors, output = descriptors_outputs(df, d_start, o)
kf = KFold(n_splits=folds, shuffle=True, random_state=130)
clf = RandomForestRegressor(n_estimators=num_trees,
max_features=max_feat, max_depth=max_depth,
min_samples_leaf=min_samp_leaf,
min_samples_split=min_samples_split,
n_jobs=-1, random_state=130)
N = X[X.columns[x_start:]]
# shape is folds rows x (num of data points predicting) columns
preds_all = np.zeros((folds, N.shape[0]))
count = -1
for train_idx, test_idx in kf.split(descriptors, output):
X_train, y_train = traintest(descriptors, output, train_idx, test_idx)
# train the RFR model on the descriptors and specific output
clf.fit(X_train, y_train)
count += 1
N_arr = np.array(N)
preds = []
for n in tqdm(np.arange(len(N_arr))):
preds = predict_append(clf, N_arr, n, preds)
# pred = clf.predict(N_arr[n].reshape(1,-1))[0]
# preds.append(pred)
preds_all[count] = preds
Y_arr = dft_points(true_y, Y, N_arr)
pred_df = pd.DataFrame(data=preds_all)
pred_df_desc = pred_df.describe()
err_df = uncert_table(N, X, type_col, ab_col, site_col, imp_col, Y_arr,
pred_df_desc)
return pred_df, err_df
def largest_uncertainty(df, num_vals, column):
'''
Takes in a dataframe from uncertainty_calc func and returns a dataframe
of the n largest uncertainties ordered by a particular column
Inputs
- df: pd df. dataframe from ucertainity_calc
- num_vals: int. number of largest vals to return
- column: str. column name in the df to sort largest by
(ie 'std_dev' or 'err_interval')
Outputs
- df_largest: pd df. dataframe of n largest values sorted by column
- idx: list. a list of the index values of the n largest uncertainties
'''
df_largest = df.nlargest(num_vals, column)
idx = list(df_largest.index)
return df_largest, idx
|
from scipy.spatial import cKDTree
from collections import defaultdict
import numpy as np
import argparse
from pickle import load, dump
import pandas as pd
def sumCells(vor, heatmap):
# Generate all points to loop through
x = np.linspace(vor.min_bound[0], vor.max_bound[0], heatmap.shape[0])
y = np.linspace(vor.min_bound[1], vor.max_bound[1], heatmap.shape[1])
points = np.array([(xi, yi) for xi in x for yi in y])
# Make k-d tree for point lookup
voronoi_kdtree = cKDTree(vor.points)
dist, point_regions = voronoi_kdtree.query(points, k=1)
# Sum scores in heatmap for each voronoi cell
cells = defaultdict(float)
# Calculate number of pixels in region
normalizer = cells.copy()
for pointi, reg in enumerate(point_regions):
# Convert point index to i,j pairs from the image
# TODO: The '%' conversion might not be perfect arithmetic
i, j = pointi//heatmap.shape[0], pointi % heatmap.shape[1]
cells[list(vor.point_region).index(vor.point_region[reg])] += heatmap[i][j]
normalizer[list(vor.point_region).index(vor.point_region[reg])] += 1
return cells, normalizer
def merge(cellsum, normalizer, atomsdf):
sums=pd.DataFrame.from_dict(cellsum, orient='index', columns=["Cell Sum"])
sums["Cell Area"] = normalizer.values()
return pd.merge(atomsdf, sums, left_index=True, right_index=True)
def unpack_dict(data_dict):
atoms = data_dict['atoms']
vor = data_dict['vor']
saliencyMap = data_dict['saliencyMap'] # the heatmap
trueClass = data_dict['trueClass'] # the real class of this mol
predClass = data_dict['predClass'] # the predicted class of this mol
return atoms, vor, saliencyMap, trueClass, predClass
def unpack_pkl(pklFile):
with open(pklFile, 'rb') as f:
data_dict = load(f)
dict_xoy_p = data_dict['xoy_+']
dict_xoy_n = data_dict['xoy_-']
dict_yoz_p = data_dict['yoz_+']
dict_yoz_n = data_dict['yoz_-']
dict_zox_p = data_dict['zox_+']
dict_zox_n = data_dict['zox_-']
info_xoy_p = unpack_dict(dict_xoy_p)
info_xoy_n = unpack_dict(dict_xoy_n)
info_yoz_p = unpack_dict(dict_yoz_p)
info_yoz_n = unpack_dict(dict_yoz_n)
info_zox_p = unpack_dict(dict_zox_p)
info_zox_n = unpack_dict(dict_zox_n)
return info_xoy_p, info_xoy_n, info_yoz_p, info_yoz_n, info_zox_p, info_zox_n
if __name__ == "__main__":
parser = argparse.ArgumentParser('python')
parser.add_argument('-pickle',
required = True,
help='pickle object from Bionoi')
parser.add_argument('-out',
required = True,
help='path to output pickle (ends in \'.pkl\')')
args = parser.parse_args()
with open(args.pickle, 'rb') as f:
data_dict = load(f)
# Loop through atoms dataframes, sum their cells, then add Cell Sum column to merged_df
merged_df = data_dict["xoy_+"]["atoms"].drop(labels=["P(x)", "P(y)", "polygons", "color"], axis=1)
for key, val in data_dict.items():
# Get variables
atoms = val['atoms']
vor = val['vor']
heatmap = val['saliencyMap']
# Dropping prevents build up of column space
atoms = atoms.drop(labels=["P(x)", "P(y)", "polygons", "color"], axis=1)
# Sum and store
sums, areas = sumCells(vor, heatmap)
tmp_df = merge(sums, areas, atoms)
tmp_df["Cell Score"] = tmp_df["Cell Sum"] / tmp_df["Cell Area"]
# Warning : this merging assumes that the different DataFrames are ordered exactly the same
# TODO : Merge by column instead of adding new column
merged_df["Cell Score %s" % key] = tmp_df["Cell Score"]
# Sum the cells by atom
final = merged_df.copy()
final["total"] = merged_df[[col for col in merged_df.columns if col.startswith("Cell Score")]].sum(axis=1)
final = final.drop([col for col in final.columns if col.startswith("Cell Score")], axis=1)
final = final.sort_values("total", ascending=False)
with open(args.out, "wb") as pkl:
dump(final, pkl)
|
from django.urls import path,include,re_path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
path(r'',views.index,name = 'index'),
#path(r'^signup/$', views.signup, name='signup'),
path(r'^create/profile/$',views.create_profile, name='create-profile'),
path('profile/<str:username>/',views.profile,name='profile'),
path(r'^api/profiles/$', views.ProfileList.as_view()),
path('category/',views.category,name='category'),
path(r'^hoods/new/post/(\d+)$', views.post_new, name='new-post'),
path(r'^map$', views.maps, name='maps'),
path(r'^hoods/new/business/(\d+)$',views.post_business, name='new-business'),
path(r'^hoods/(\d+)',views.hoods,name='hoods'),
path(r'^hoods/(\d+)',views.new_hood,name='new-hood'),
#path(r'^search/', views.search_results, name='search_results'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) |
#!/usr/bin/python -u
import time
import os
import glob
import board
import busio
from adafruit_ht16k33 import segments
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
# Grabs the first probe out of the directory
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
i2c = busio.I2C(board.SCL, board.SDA)
display = segments.Seg14x4(i2c)
def c_to_f(c):
return c * 9.0 / 5.0 + 32.0
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
return float(temp_string) / 1000.0
def clear():
display.fill(0)
def write_display(text):
clear()
# set brightness, range 0-1.0, 1.0 max brightness
display.brightness = 1
display.print(text)
print('Press Ctrl-C to quit.')
while True:
temp = read_temp()
temp_in_f = int(c_to_f(temp))
write_display("{} F".format(temp_in_f))
time.sleep(5)
write_display("{} C".format(int(temp)))
time.sleep(5)
|
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import csv
import logging
import os
import sys
import boto3
import botocore.session
import click
from botocore.exceptions import ClientError
from audit import ProjectAudit
@click.command()
@click.option("--aws-profile", is_flag=False, default="default", help="The Named AWS profile")
@click.option(
"--output-file", is_flag=False, help="The output file showing audit report for invalid records",
)
def main(aws_profile, output_file):
"""
This script audits invalid records in the projects table - specifically projects template pdfs
"""
try:
if os.environ.get("STAGE") is None:
logging.warning("Please set the 'STAGE' environment variable - typically one of: {dev, staging, prod}")
return
stage = os.environ.get("STAGE", "dev")
projects_table_name = "cla-{}-projects".format(stage)
session = boto3.Session(profile_name=aws_profile)
dynamodb = session.resource("dynamodb")
projects_table = dynamodb.Table(projects_table_name)
projects = projects_table.scan()["Items"]
# set the projects table used in the audit process
audit_project = ProjectAudit(dynamodb, batch=projects)
invalid_fields = audit_project.process_batch()
columns = ["project_id", "error_type", "column", "data"]
with open(output_file, "w", newline="") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=columns, delimiter=" ")
writer.writeheader()
writer.writerows(
{
"project_id": audit["project_id"],
"error_type": audit["error_type"],
"column": audit["column"],
"data": audit["data"],
}
for audit in invalid_fields
)
except (Exception, ClientError) as err:
logging.error(err,exc_info=True)
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
target_host = '127.0.0.1'
target_port = 8787
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto("message here", (target_host, target_port))
data, addr = client.recvfrom(4096)
print(data)
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000025"
addresses_name = "local.2018-05-03/Version 2/LBNewham Democracy_Club__03May2018.TSV"
stations_name = "local.2018-05-03/Version 2/LBNewham Democracy_Club__03May2018.TSV"
elections = ["local.2018-05-03", "mayor.newham.2018-05-03"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
if record.addressline6 == "E16 1EF":
return None
if record.property_urn == "10090852604":
return None
if record.property_urn == "10034510101":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E13 8NA"
return rec
if record.addressline6 == "E16 1XF":
return None
if record.property_urn == "10090756946":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E7 9AW"
return rec
if record.property_urn == "10023994990":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E7 9AW"
return rec
return super().address_record_to_dict(record)
|
import requests
from config import GRAPHQL_API, HEADERS
def run_query(query): # A simple function to use requests.post to make the API call. Note the json= section.
request = requests.post(GRAPHQL_API, json={'query': query}, headers=HEADERS)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(address) -> list:
query = '''{
cyber_gift(where: {address: {_eq: "%s"}}) {
address
audience
gift
grade
segment
}
}''' % address
result = run_query(query)
return result['data']['cyber_gift']
def format_for_aggregate(address: str) -> ():
data = get_data(address)
gift = 0.0
audience = []
grade = []
segment = []
for x in data:
gift += float(x['gift'])
audience.append(x['audience'])
grade.append(x['grade'])
segment.append(x['segment'])
return gift, audience, grade, segment
def format_for_full_data(address: str) -> list:
data = get_data(address)
result = []
for x in data:
result.append({
"denom": "Mboot",
"address": x['address'],
"gift": x['gift'],
"claimed": False,
"claimed_amount": "0",
"unclaimed_amount": x['gift'],
"audience": x['audience'],
"grade": x['grade'],
"segment": x['segment']
})
return result |
from georef_ar_etl.models import Province
from georef_ar_etl.exceptions import ProcessException
from georef_ar_etl.provinces import ProvincesExtractionStep
from . import ETLTestCase
class TestEntitiesExtractionStep(ETLTestCase):
def setUp(self):
super().setUp()
self._tmp_provinces = self.create_test_provinces()
def tearDown(self):
self._ctx.session.commit()
self._ctx.session.query(Province).delete()
self._ctx.session.query(self._tmp_provinces).delete()
super().tearDown()
def test_repeated_tmp_entity(self):
"""Si una tabla temporal (tmp_) contiene dos entidade con el mismo
código, debería lanzarse una excepción durante el proceso de extracción
para cualquier clase que herede de EntitiesExtractionStep."""
step = ProvincesExtractionStep()
prov = self._ctx.session.query(self._tmp_provinces).first()
self._ctx.session.add(self._tmp_provinces(in1=prov.in1))
self._ctx.session.commit()
with self.assertRaisesRegex(ProcessException, 'Clave primaria'):
step.run(self._tmp_provinces, self._ctx)
|
from django.apps import AppConfig
class ExpeConfig(AppConfig):
name = 'experiments'
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2017 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from .base_bucket import BaseBucket
import pdb
#***************************************************************
class DictBucket(BaseBucket):
""""""
#=============================================================
def __init__(self, idx, depth, config=None):
""""""
super(DictBucket, self).__init__(idx, config=config)
self._depth = depth
self._indices = []
self._tokens = []
self._str2idx = {}
return
#=============================================================
def reset(self):
""""""
self._indices = []
self._tokens = []
self._str2idx = {}
return
#=============================================================
def add(self, indices, tokens):
""""""
assert self._is_open, 'DictBucket is not open for adding entries'
string = ' '.join(tokens)
if string in self._str2idx:
sequence_index = self._str2idx[string]
else:
sequence_index = len(self._indices)
self._str2idx[string] = sequence_index
self._tokens.append(tokens)
super(DictBucket, self).add(indices)
return sequence_index
#=============================================================
def close(self):
""""""
# Initialize the index matrix
first_dim = len(self._indices)
second_dim = max(len(indices) for indices in self._indices) if self._indices else 0
shape = [first_dim, second_dim]
if self.depth > 0:
shape.append(self.depth)
elif self.depth == -1:
shape.append(shape[-1])
attr_mode=False
if self._indices:
# if type(self._indices[0][0])==type((0,0)):
# pdb.set_trace()
if type(self._indices[0][0])==type(np.array(0)):
shape.append(self._indices[0][0].shape[0])
attr_mode=True
data = np.zeros(shape, dtype=np.int32)
# Add data to the index matrix
if self.depth >= 0:
try:
for i, sequence in enumerate(self._indices):
if sequence:
if attr_mode:
sequence=np.array(sequence)
data[i, 0:len(sequence)] = sequence
except ValueError:
print('Expected shape: {}\nsequence: {}'.format([len(sequence), self.depth], sequence))
print('\ntokens: {}'.format(self._tokens[i]))
raise
elif self.depth == -1:
# for graphs, sequence should be list of (idx, val) pairs
for i, sequence in enumerate(self._indices):
for j, node in enumerate(sequence):
for edge in node:
if isinstance(edge, (tuple, list)):
edge, v = edge
data[i, j, edge] = v
else:
data[i, j, edge] = 1
super(DictBucket, self).close(data)
return
#=============================================================
def bert_close(self,sep_token=None,is_pretrained=False,get_dephead=False):
# Data Preprocess specially for bert
if is_pretrained:
first_dim = len(self._indices)
second_dim = max(indices.shape[0] for indices in self._indices) if self._indices else 0
third_dim = self._indices[0].shape[-1] if self._indices else 0
shape = [first_dim, second_dim, third_dim]
data = np.zeros(shape, dtype=np.float32)
if get_dephead:
assert len(self._tokens)==len(self._indices), "inconsistant of tokens and features!"
for i,indices in enumerate(self._indices):
if get_dephead:
tokens=self._tokens[i]
assert len(tokens)==indices.shape[0], "inconsistant of tokens and features!"
data[i,:indices.shape[0]]=indices[[int(x) for x in tokens]]
else:
data[i,:indices.shape[0]]=indices
super(DictBucket, self).close(data)
return
first_dim = len(self._indices)
# if first_dim>0:
# pdb.set_trace()
bertlist=[]
bertmask=[]
token_mapping=[]
for orig_tokens in self._indices:
bert_tokens=[]
orig_to_tok_map=[]
bert_tokens.append(sep_token[0])
for orig_token in orig_tokens:
orig_to_tok_map.append(len(bert_tokens))
bert_tokens.extend(orig_token)
bert_tokens.append(sep_token[1])
bertlist.append(bert_tokens)
bertmask.append([1]*len(bert_tokens))
token_mapping.append(orig_to_tok_map)
# Initialize the index matrix
bert_tokens_dim = max(len(indices) for indices in bertlist) if bertlist else 0
bertmask_dim = max(len(indices) for indices in bertmask) if bertmask else 0
mapping_dim = max(len(indices) for indices in token_mapping) if token_mapping else 0
bert_tokens_shape = [first_dim, bert_tokens_dim]
bertmask_shape = [first_dim, bertmask_dim]
mapping_shape = [first_dim, mapping_dim]
bert_tokens_data = np.zeros(bert_tokens_shape, dtype=np.int32)
segment_data = np.zeros(bert_tokens_shape, dtype=np.int32)
bertmask_data = np.zeros(bertmask_shape, dtype=np.int32)
mapping_data = np.zeros(mapping_shape, dtype=np.int32)
for i in range(len(bertlist)):
if bertlist[i]:
#bert token should have the same shape as bert mask
bert_tokens_data[i, 0:len(bertlist[i])] = bertlist[i]
bertmask_data[i, 0:len(bertmask[i])] = bertmask[i]
if token_mapping[i]:
mapping_data[i, 0:len(token_mapping[i])] = token_mapping[i]
#set the bert dictionary
data={}
data['input_ids']=bert_tokens_data
data['input_mask']=bertmask_data
data['segment_ids']=segment_data
data['mapping']=mapping_data
super(DictBucket, self).close(data)
return
#=============================================================
def elmo_close(self):
# Data Preprocess specially for bert or elmo
first_dim = len(self._indices)
second_dim = max(indices.shape[0] for indices in self._indices) if self._indices else 0
third_dim = self._indices[0].shape[-1] if self._indices else 0
shape = [first_dim, second_dim, 3, third_dim]
data = np.zeros(shape, dtype=np.float32)
for i,indices in enumerate(self._indices):
data[i,:indices.shape[0]]=indices
super(DictBucket, self).close(data)
return
#=============================================================
@property
def depth(self):
return self._depth
@property
def data_indices(self):
return self._data
|
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision # this is the database of torch
from torchvision import datasets, transforms
class Discriminator(torch.nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=1),
nn.LeakyReLU(inplace=True, negative_slope=0.01),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1),
nn.LeakyReLU(inplace=True, negative_slope=0.01),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(4*4*64, 4*4*64),
nn.LeakyReLU(inplace=True, negative_slope=0.01),
nn.Dropout(p=0.5), # prevent overfitting
nn.Linear(4*4*64, 86),
nn.LeakyReLU(inplace=True, negative_slope=0.01)
)
self.last = nn.Linear(86,1)
def forward(self, x):
x = self.feature(x)
# x = self.conv2(x)
x = x.view(x.size(0), -1)
return_feature = self.classifier(x)
x = self.last(return_feature)
return x, return_feature |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-core/ampel/core/ContextUnit.py
# License : BSD-3-Clause
# Author : vb <vbrinnel@physik.hu-berlin.de>
# Date : 07.10.2019
# Last Modified Date: 19.09.2021
# Last Modified By : vb <vbrinnel@physik.hu-berlin.de>
from typing import Optional
from ampel.base.AmpelBaseModel import AmpelBaseModel
from ampel.core.AmpelContext import AmpelContext
from ampel.log.AmpelLogger import AmpelLogger
from ampel.secret.Secret import Secret
class ContextUnit(AmpelBaseModel):
"""
Base class for units requiring a reference to an AmpelContext instance
"""
#: Private variable potentially set by UnitLoader for provenance purposes. Either:
#: * None if provanance flag is False
#: * 0 in case model content is not serializable
#: * any other signed int value
_trace_id: Optional[int] = None
def __init__(self, context: AmpelContext, **kwargs) -> None:
if context is None:
raise ValueError("Parameter context cannot be None")
super().__init__(**kwargs)
d = self.__dict__
self._trace_content = {
k: d[k]
for k in sorted(d)
if not isinstance(d[k], (Secret, AmpelContext, AmpelLogger))
}
self.context = context
|
from __future__ import absolute_import, division, print_function
import os
import sys
import argparse
from math import log
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import gzip
import pickle
import random
from datetime import datetime
import matplotlib.pyplot as plt
import torch
from easydict import EasyDict as edict
from utils import *
class KnowledgeGraph(object):
def __init__(self, dataset):
self.G = dict()
self._load_entities(dataset)
self.dataset_name = dataset.dataset_name
if self.dataset_name in AMAZON_DATASETS:
self._load_reviews_with_text(dataset)
#self._load_reviews(dataset)
else:
self._load_reviews(dataset)
self._load_knowledge(dataset)
self._clean()
self.top_matches = None
def _load_entities(self, dataset):
print('Load entities...')
num_nodes = 0
for entity in get_entities(dataset.dataset_name):
self.G[entity] = {}
vocab_size = getattr(dataset, entity).vocab_size
for eid in range(vocab_size):
relations = get_dataset_relations(dataset.dataset_name, entity)
self.G[entity][eid] = {r: [] for r in relations}
num_nodes += vocab_size
print('Total {:d} nodes.'.format(num_nodes))
def _load_reviews(self, dataset):
print('Load reviews...')
num_edges = 0
for rid, data in enumerate(dataset.review.data):
uid, pid, _, _ = data
# (2) Add edges.
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[dataset.dataset_name]
self._add_edge(USER, uid, main_interaction, main_product, pid)
num_edges += 2
print('Total {:d} review edges.'.format(num_edges))
# with open('./tmp/review_removed_words.txt', 'w') as f:
# f.writelines([' '.join(words) + '\n' for words in all_removed_words])
def _load_reviews_with_text(self, dataset, word_tfidf_threshold=0.1, word_freq_threshold=5000):
print('Load reviews...')
# (1) Filter words by both tfidf and frequency.
vocab = dataset.word.vocab
text_reviews = [d[4] for d in dataset.review.data]
review_tfidf = compute_tfidf_fast(vocab, text_reviews)
distrib = dataset.review.word_distrib
num_edges = 0
all_removed_words = []
for rid, data in enumerate(dataset.review.data):
uid, pid, _, _, review = data #uid, pid, rating, timestamp, review (words)
doc_tfidf = review_tfidf[rid].toarray()[0]
remained_words = [wid for wid in set(review) if doc_tfidf[wid] >= word_tfidf_threshold and distrib[wid] <= word_freq_threshold]
removed_words = set(review).difference(remained_words) # only for visualize
removed_words = [vocab[wid] for wid in removed_words]
all_removed_words.append(removed_words)
if len(remained_words) <= 0:
continue
# (2) Add edges.
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[dataset.dataset_name]
self._add_edge(USER, uid, main_interaction, main_product, pid)
num_edges += 2
# I shall exploit the fact that a user has positive interacted with the movie
for wid in remained_words:
self._add_edge(USER, uid, MENTION, WORD, wid)
self._add_edge(main_product, pid, DESCRIBED_AS, WORD, wid)
num_edges += 4
print('Total {:d} review edges.'.format(num_edges))
# with open('./tmp/review_removed_words.txt', 'w') as f:
# f.writelines([' '.join(words) + '\n' for words in all_removed_words])
def _load_knowledge(self, dataset):
relations = get_knowledge_derived_relations(dataset.dataset_name)
main_entity, _ = MAIN_PRODUCT_INTERACTION[dataset.dataset_name]
for relation in relations:
print('Load knowledge {}...'.format(relation))
data = getattr(dataset, relation).data
num_edges = 0
for pid, eids in enumerate(data):
if len(eids) <= 0:
continue
for eid in set(eids):
et_type = get_entity_tail(dataset.dataset_name, relation)
self._add_edge(main_entity, pid, relation, et_type, eid)
num_edges += 2
print('Total {:d} {:s} edges.'.format(num_edges, relation))
def _add_edge(self, etype1, eid1, relation, etype2, eid2):
self.G[etype1][eid1][relation].append(eid2)
self.G[etype2][eid2][relation].append(eid1)
def _clean(self):
print('Remove duplicates...')
for etype in self.G:
for eid in self.G[etype]:
for r in self.G[etype][eid]:
data = self.G[etype][eid][r]
data = tuple(sorted(set(data)))
self.G[etype][eid][r] = data
def compute_degrees(self):
print('Compute node degrees...')
self.degrees = {}
self.max_degree = {}
for etype in self.G:
self.degrees[etype] = {}
for eid in self.G[etype]:
count = 0
for r in self.G[etype][eid]:
count += len(self.G[etype][eid][r])
self.degrees[etype][eid] = count
def get(self, eh_type, eh_id=None, relation=None):
data = self.G
if eh_type is not None:
data = data[eh_type]
if eh_id is not None:
data = data[eh_id]
if relation is not None:
data = data[relation]
return data
def __call__(self, eh_type, eh_id=None, relation=None):
return self.get(eh_type, eh_id, relation)
def get_tails(self, entity_type, entity_id, relation):
return self.G[entity_type][entity_id][relation]
'''
def get_tails_given_user(self, entity_type, entity_id, relation, user_id):
""" Very important!
:param entity_type:
:param entity_id:
:param relation:
:param user_id:
:return:
"""
tail_type = KG_RELATION[entity_type][relation]
tail_ids = self.G[entity_type][entity_id][relation]
if tail_type not in self.top_matches:
return tail_ids
top_match_set = set(self.top_matches[tail_type][user_id])
top_k = len(top_match_set)
if len(tail_ids) > top_k:
tail_ids = top_match_set.intersection(tail_ids)
return list(tail_ids)
def trim_edges(self):
degrees = {}
for entity in self.G:
degrees[entity] = {}
for eid in self.G[entity]:
for r in self.G[entity][eid]:
if r not in degrees[entity]:
degrees[entity][r] = []
degrees[entity][r].append(len(self.G[entity][eid][r]))
for entity in degrees:
for r in degrees[entity]:
tmp = sorted(degrees[entity][r], reverse=True)
print(entity, r, tmp[:10])
def get_user_item_path_distribution(self, path_patter_name):
path_pattern_degree = 0
for (k, v) in self.degrees[path_patter_name]:
path_pattern_degree += v
return path_pattern_degree
def get_total_path_pattern_number(self):
path_pattern_degree = 0
for (path_pattern, ) in self.degrees:
for (k, v) in self.degrees[k]:
path_pattern_degree += v
return path_pattern_degree
def set_top_matches(self, u_u_match, u_p_match, u_w_match):
self.top_matches = {
USER: u_u_match,
MOVIE: u_p_match,
#WORD: u_w_match,
}
def heuristic_search(self, uid, pid, pattern_id, trim_edges=False):
if trim_edges and self.top_matches is None:
raise Exception('To enable edge-trimming, must set top_matches of users first!')
if trim_edges:
_get = lambda e, i, r: self.get_tails_given_user(e, i, r, uid)
else:
_get = lambda e, i, r: self.get_tails(e, i, r)
pattern = PATH_PATTERN[pattern_id]
paths = []
if pattern_id == 1: # OK
wids_u = set(_get(USER, uid, MENTION)) # USER->MENTION->WORD
wids_p = set(_get(PRODUCT, pid, DESCRIBED_AS)) # PRODUCT->DESCRIBE->WORD
intersect_nodes = wids_u.intersection(wids_p)
paths = [(uid, x, pid) for x in intersect_nodes]
elif pattern_id in [11, 12, 13, 14, 15, 16, 17]:
pids_u = set(_get(USER, uid, PURCHASE)) # USER->PURCHASE->PRODUCT
pids_u = pids_u.difference([pid]) # exclude target product
nodes_p = set(_get(PRODUCT, pid, pattern[3][0])) # PRODUCT->relation->node2
if pattern[2][1] == USER:
nodes_p.difference([uid])
for pid_u in pids_u:
relation, entity_tail = pattern[2][0], pattern[2][1]
et_ids = set(_get(PRODUCT, pid_u, relation)) # USER->PURCHASE->PRODUCT->relation->node2
intersect_nodes = et_ids.intersection(nodes_p)
tmp_paths = [(uid, pid_u, x, pid) for x in intersect_nodes]
paths.extend(tmp_paths)
elif pattern_id == 18:
wids_u = set(_get(USER, uid, MENTION)) # USER->MENTION->WORD
uids_p = set(_get(PRODUCT, pid, PURCHASE)) # PRODUCT->PURCHASE->USER
uids_p = uids_p.difference([uid]) # exclude source user
for uid_p in uids_p:
wids_u_p = set(_get(USER, uid_p, MENTION)) # PRODUCT->PURCHASE->USER->MENTION->WORD
intersect_nodes = wids_u.intersection(wids_u_p)
tmp_paths = [(uid, x, uid_p, pid) for x in intersect_nodes]
paths.extend(tmp_paths)
return paths
'''
def check_test_path(dataset_str, kg):
# Check if there exists at least one path for any user-product in test set.
test_user_products = load_labels(dataset_str, 'test')
for uid in test_user_products:
for pid in test_user_products[uid]:
count = 0
for pattern_id in [1, 11, 12, 13, 14, 15, 16, 17, 18]:
tmp_path = kg.heuristic_search(uid, pid, pattern_id)
count += len(tmp_path)
if count == 0:
print(uid, pid)
|
import configparser
import os
import ast
from zaailabcorelib.zconfig.constant import *
class ZConfig():
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if ZConfig.__instance == None:
ZConfig.__instance = ZConfig()
return ZConfig.__instance
def __init__(self, config_dir='./conf', auto_load=True):
self._config_dir = config_dir
self._getConfigDirectory()
try:
env = os.environ['SERVICE_ENV_SETTING']
assert env in ["DEVELOPMENT", "PRODUCTION", "STAGING"]
except:
raise ValueError(
"The environment param `SERVICE_ENV_SETTING` need to be assigned as: DEVELOPMENT | PRODUCTION | STAGING")
if env == 'DEVELOPMENT':
self.conf = self._development()
elif env == 'STAGING':
self.conf = self._staging()
elif env == 'PRODUCTION':
self.conf = self._production()
# Automatically load all config from <config>.ini
if auto_load:
self.ARGS = self._load_all_config()
def _load_all_config(self):
conf_args = {}
for sec_name in self.conf.keys():
for val_name in self.conf[sec_name]:
try:
conf_args[sec_name + '@' + val_name] = ast.literal_eval(self.conf[sec_name][val_name])
except:
conf_args[sec_name + '@' + val_name] = str(self.conf[sec_name][val_name])
return conf_args
def _development(self):
configParser = configparser.ConfigParser()
configParser.read(self._dev_config_paths)
return configParser
def _staging(self):
configParser = configparser.ConfigParser()
configParser.read(self._stag_config_paths)
return configParser
def _production(self):
configParser = configparser.ConfigParser()
configParser.read(self._prod_config_paths)
return configParser
def _getConfigDirectory(self):
self._dev_config_paths = os.path.join(self._config_dir, DEV_FILENAME)
self._prod_config_paths = os.path.join(self._config_dir, PROD_FILENAME)
self._stag_config_paths = os.path.join(self._config_dir, STAG_FILENAME)
for f in [self._dev_config_paths, self._prod_config_paths, self._stag_config_paths]:
if not os.path.exists(f):
raise FileNotFoundError("File not found: {}".format(f))
def getString(self, block, key, default=None):
return str(self.conf[block][key])
def getInt(self, block, key, default=0):
if self.conf[block][key] is None:
return default
return int(self.conf[block][key])
def getFloat(self, block, key, default=0.0):
if self.conf[block][key] is None:
return default
return float(self.conf[block][key])
def getBool(self, block, key, default=0.0):
if self.conf[block][key] is None:
return default
return ast.literal_eval(self.conf[block][key])
def getList(self, block, key, default=[]):
if self.conf[block][key] is None:
return default
return ast.literal_eval(self.conf[block][key])
|
#!/usr/bin/env python3
import csv
import os
import sys
import gpsd
import argparse
from scapy.all import *
from multiprocessing import Process, current_process, Value
### Global Declaration ###
BANNER ="""
_, _, ___, ,_ ,_ , _, ___,___,_,,_
(_,(_,' | | \, |_)| / \,' | ' | /_,|_)
_) _) _|_,_|_/ '| '|__'\_/ | |'\_'| \
' ' ' ' ' ' ' ' ' `' `
v0.1
"""
filename = "out.csv"
ssids = set() # for testing purpose to store values locally instead of csv file
channel = Value('i',0) # needed for shared state between multiprocessors
power = Value('i',0) # needed for shared variable between function (not sure how to pass argument to prn in sniff(), scapy)
SIG_THRESHOLD = -80 # signal threshold before storing value (in dB)
def parse_arguments():
"""
Handle user-supplied arguments
"""
desc =('tool to generate a csv file containing '
'SSIDs, BSSID, signal strength & its location. The csv file can'
'be uploaded to google maps/earth to plot the '
'location of the access points - Requires the use of GPS dongle')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-i','--interface', metavar="",type=str, help='wireless interface',required = True )
parser.add_argument('-c','--count', metavar="", type=int, help='packets to sniff. Default = 10000', default=10000)
parser.add_argument('-p','--power', metavar="", type=int, help='minimum SSID signal needs to be in dB, make sure its negative value', default=-100)
parser.add_argument('-g','--gps', metavar="", type=int, help='Enable or disable GPS - [1]=enabled, [0]=disabled', default=1)
args = parser.parse_args()
return args
def PacketHandler(pkt) :
if pkt.haslayer(Dot11Beacon) :
if pkt.info: # if not hidden SSID
# check if pkt.info(SSID) or pkt.addr3 (BSSID) is already n csv_file
if check(pkt.info,pkt.addr3,pkt.dBm_AntSignal) :
write(pkt.info,pkt.addr3,pkt.dBm_AntSignal) # write new found SSID + BSSID into csv
#print (len(ssids), pkt.addr3, pkt.info) #addr3 = BSSID
def check(ssid, bssid, signal):
ssid_exists = 0
if (signal < power.value) :
return 0
with open("out.csv", "r") as file1:
for line1 in file1:
if ssid.decode() in line1: # search if SSID is in file
ssid_exists = 1
file1.close()
if ssid_exists:
with open("out.csv", "r") as file2:
for line2 in file2:
if bssid in line2: # search if BSSID is in file
file2.close()
return 0
else : # no SSID / BSSID found , must be new entry
return 1
def write(ssid, bssid, signal):
# write coordinates into file including SSID
try :
packet = gpsd.get_current()
coordinate = packet.position()
# Separate latitude and longtitude
latitude = coordinate[0]
longitude = coordinate[1]
except:
latitude = ""
longitude = ""
print ("[+] Adding Entry:",ssid.decode(),bssid,"CH" + str(channel.value),str(signal)+"dB",latitude,longitude)
with open (filename, mode='a') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow([ssid.decode(),bssid,channel.value,signal,latitude,longitude])
def channel_hop(interface):
#process_id = os.getpid()
while True:
try:
channel.value = random.randrange(1,15)
os.system("iwconfig " + interface + " channel " + str(channel.value))
time.sleep(1)
except KeyboardInterrupt:
break
def main():
#process_id1 = os.getpid()
"""Main Function"""
print(BANNER + "\n\n")
args = parse_arguments()
# Start the channel hopper, creates a new process
p = Process(target = channel_hop, args=(args.interface,))
p.start()
# To do: create new process for gpsd
#open newfile or existing file
exists = os.path.isfile(filename)
if not exists:
# if new file create fieldnames in csv file
with open(filename, mode='w') as new_file:
CSV_FIELDNAME = ['SSID','BSSID','channel','signal strength','latitude', 'longlitude']
CSV_WRITER = csv.DictWriter(new_file, fieldnames=CSV_FIELDNAME, delimiter=",")
CSV_WRITER.writeheader()
new_file.close()
# connect to the local gpsd . gpsd -N -n -D2 /dev/ttyACM0
#start_gpsd = 'gpsd -N -n /dev/ttyACM0'
#os.system(start_gpsd)
if (args.gps):
gpsd.connect()
power.value = args.power # used in check() function
sniff(iface = args.interface, count = args.count, prn = PacketHandler)
if __name__ == "__main__":
main()
|
# BSD 3-Clause License
# Copyright (c) 2017, Federico T.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import warnings
import numpy as np
from six.moves import range
from functools import partial
from sklearn.covariance import empirical_covariance
from sklearn.utils.validation import check_X_y
from regain.covariance.missing_graphical_lasso_ import \
_compute_empirical_covariance, _compute_cs, _compute_mean
from regain.covariance.kernel_time_graphical_lasso_ import \
kernel_time_graphical_lasso, KernelTimeGraphicalLasso
from regain.covariance.time_graphical_lasso_ import loss
from regain.covariance.missing_graphical_lasso_ import \
LatentMissingGraphicalLasso
from regain.scores import log_likelihood_t, BIC_t, EBIC_t, EBIC_m_t
from regain.validation import check_norm_prox
from regain.utils import convergence, ensure_posdef, positive_definite
from regain.norm import l1_norm
def missing_time_graphical_lasso(
X, alpha=0.01, rho=1, kernel=None, psi='laplacian',
over_relax=1, max_iter=100, verbose=False,
tol=1e-4, rtol=1e-4, return_history=False, return_n_iter=True,
update_rho_options=None, compute_objective=True):
r"""Missing Graphical lasso solver via EM algorithm.
Solves the following problem:
minimize trace(S*K) - log det K + alpha ||K||_{od,1}
where S = (1/n) X^T \times X is the empirical covariance of the data
matrix X (which contains missing data).
Parameters
----------
X : array-like shape=(n_samples, n_variables)
Data matrix.
alpha : float, optional
Regularisation parameter.
rho : float, optional
Augmented Lagrangian parameter.
kernel: array-like shape(n_times, n_times)
The kernel to use to enforce similatiries among times.
psi: string, defulat='laplacian'
Type of consistency between networks. Option are "l1", "l2", "linf",
"laplacian", "l12"
over_relax : float, optional
Over-relaxation parameter (typically between 1.0 and 1.8).
max_iter : int, optional
Maximum number of iterations.
tol : float, optional
Absolute tolerance for convergence.
rtol : float, optional
Relative tolerance for convergence.
return_history : bool, optional
Return the history of computed values.
return_n_iter : bool, optional
Return the number of iteration before convergence.
verbose : bool, default False
Print info at each iteration.
update_rho_options : dict, optional
Arguments for the rho update.
See regain.update_rules.update_rho function for more information.
compute_objective : bool, default True
Choose to compute the objective value.
Returns
-------
X : numpy.array, 2-dimensional
Solution to the problem.
S : np.array, 2 dimensional
Final empirical covariance matrix.
n_iter : int
If return_n_iter, returns the number of iterations before convergence.
history : list
If return_history, then also a structure that contains the
objective value, the primal and dual residual norms, and tolerances
for the primal and dual residual norms at each iteration.
"""
n_times, n_samples, d = X.shape
K = np.zeros((n_times, d, d))
means = np.zeros((n_times, d))
loglik = -np.inf
checks = []
for iter_ in range(max_iter):
old_logl = loglik
cs = np.array([_compute_cs(means[t, :], K[t, :, :], X[t, :, :])
for t in range(n_times)])
means = np.array([_compute_mean(X[t, :, :], cs[t, :, :])
for t in range(n_times)])
emp_cov = np.array([
_compute_empirical_covariance(X[t, :, :], K[t, :, :],
cs[t, :, :])
for t in range(n_times)
])
K = kernel_time_graphical_lasso(
emp_cov, alpha=alpha, rho=rho, kernel=kernel,
max_iter=max_iter, verbose=max(0, verbose-1),
psi=psi, tol=tol, rtol=tol,
return_history=False, return_n_iter=True, mode='admm',
update_rho_options=None, compute_objective=False, stop_at=None,
stop_when=1e-4, init='empirical')[0]
loglik = loss(emp_cov, K)
diff = old_logl - loglik
checks.append(dict(iteration=iter_,
log_likelihood=loglik,
difference=diff))
if verbose:
print("Iter %d: log-likelihood %.4f, difference: %.4f" % (
iter_, loglik, diff))
if iter_ > 1 and diff < tol:
break
else:
warnings.warn("The Missing Graphical Lasso algorithm did not converge")
aux = np.nan_to_num(np.copy(X))
aux += cs
return_list = [K, emp_cov, aux]
if return_history:
return_list.append(checks)
if return_n_iter:
return_list.append(iter_)
return return_list
def objective(K, S, n_samples, alpha, beta, psi):
obj = loss(S, K, n_samples=n_samples)
obj += sum(map(l1_norm, alpha * K))
obj += beta * sum(map(psi, K[1:] - K[:-1]))
return obj
def latent_missing_time_graphical_lasso(
emp_cov, h=2, alpha=0.01, M=None, mu=0, eta=0, beta=1., kernel=None,
psi="laplacian", strong_M=False,
n_samples=None, assume_centered=False, tol=1e-3, rtol=1e-3,
max_iter=200, verbose=0, rho=1., compute_objective=False,
return_history=False, return_n_iter=False):
psi_func, _, _ = check_norm_prox(psi)
if M is None:
M = np.zeros((emp_cov[0].shape[0], h))
else:
h = M.shape[1]
o = emp_cov[0].shape[0]
Ks = [np.random.randn(h+o, h+o)*1.5 for i in range(emp_cov.shape[0])]
Ks = [K.dot(K.T) for K in Ks]
Ks = [K / np.max(K) for K in Ks]
Ks = np.array(Ks)
if strong_M:
for i in range(Ks.shape[0]):
Ks[i, :h, h:] = M.T
Ks[i, h:, :h] = M
regularizer = np.ones((h+o, h+o))
regularizer -= np.diag(np.diag(regularizer))
regularizer[:h, :h] *= eta
regularizer[h:, h:] *= alpha
if strong_M:
regularizer[:h, h:] = 0
regularizer[h:, :h] = 0
else:
regularizer[:h, h:] = mu*M.T
regularizer[h:, :h] = mu*M
penalized_nll = np.inf
checks = []
Ss = np.zeros((emp_cov.shape[0], h+o, h+o))
Ks_prev = None
likelihoods = []
for iter_ in range(max_iter):
# expectation step
Ks_prev = Ks.copy()
Ss_ = []
for i, K in enumerate(Ks):
if strong_M:
K[:h, h:] = M.T
K[h:, :h] = M
S = np.zeros_like(K)
K_inv = np.linalg.pinv(K[:h, :h])
S[:h, :h] = K_inv + K_inv.dot(K[:h, h:]).dot(
emp_cov[i]).dot(K[h:, :h]).dot(K_inv)
S[:h, h:] = K_inv.dot(K[:h, h:].dot(emp_cov[i]))
S[h:, :h] = S[:h, h:].T
S[h:, h:] = emp_cov[i]
Ss_.append(S)
Ss = np.array(Ss_)
Ks = kernel_time_graphical_lasso(
Ss, alpha=alpha, rho=rho, kernel=kernel,
max_iter=max_iter, verbose=max(0, verbose-1),
psi=psi, tol=tol, rtol=tol,
return_history=False, return_n_iter=True, mode='admm',
update_rho_options=None, compute_objective=False, stop_at=None,
stop_when=1e-4, init='empirical')[0]
penalized_nll_old = penalized_nll
penalized_nll = objective(Ks, Ss, n_samples, regularizer, beta,
psi_func)
check = convergence(obj=penalized_nll, rnorm=np.linalg.norm(K),
snorm=penalized_nll_old - penalized_nll,
e_pri=None, e_dual=None)
checks.append(check)
thetas = [k[h:, h:] -
k[h:, :h].dot(np.linalg.pinv(k[:h, :h])).dot(k[:h, h:])
for k in Ks]
likelihoods.append(log_likelihood_t(emp_cov, thetas))
if verbose:
print("iter: %d, NLL: %.6f , NLL_diff: %.6f" %
(iter_, check[0], check[2]))
if iter_ > 2:
if np.abs(check[2]) < tol:
break
if check[2] < 0 and checks[-2][2] > 0:
Ks = Ks_prev
break
else:
warnings.warn("The optimization of EM did not converged.")
returns = [Ks, likelihoods]
if return_n_iter:
returns.append(iter_)
return returns
class TwoLayersTimeGraphicalLasso(LatentMissingGraphicalLasso,
KernelTimeGraphicalLasso):
"""Graphical Lasso with missing data as latent variables.
This method allows for graphical model selection in presence of missing
data in the dataset. It is suitable to estimate latent variables samples.
For references see:
"Yuan, Ming. Discussion: Latent variable graphical model selection via
convex optimization. Ann. Statist. 40 (2012), no. 4, 1968--1972."
"Tozzo, Veronica, et al. "Group induced graphical lasso allows for
discovery of molecular pathways-pathways interactions." arXiv preprint
arXiv:1811.09673 (2018)."
Parameters
----------
mask: array-like, shape=(n_dim_obs, n_dim_lat)
Prior knowledge to put on the connections between latent and observed
variables. If mask is a matrix of zeros the algorithm corresponds to
the one of Yuan et al.(2012), in the other case to Tozzo et al.(2018).
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
kernel : ndarray, default None
Normalised temporal kernel (1 on the diagonal),
with dimensions equal to the dimensionality of the data set.
If None, it is interpreted as an identity matrix, where there is no
constraint on the temporal behaviour of the precision matrices.
mu : positive float, default 0.01
The regularization parameter on the inter-links: the higher mu, the
more the final matrix will have links similar to the ones in mask.
eta : positive float, default 0.1
The regularization parameter on the latent variables: the higher eta,
the sparser the network on the latent variables.
rho : positive float, default 1
Augmented Lagrangian parameter.
over_relax : positive float, deafult 1
Over-relaxation parameter (typically between 1.0 and 1.8).
tol : positive float, default 1e-4
Absolute tolerance to declare convergence.
rtol : positive float, default 1e-4
Relative tolerance to declare convergence.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function, rnorm and snorm are
printed at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
update_rho_options : dict, default None
Options for the update of rho. See `update_rho` function for details.
compute_objective : boolean, default True
Choose if compute the objective function during iterations
(only useful if `verbose=True`).
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
"""
def __init__(self, h=2, mask=None, alpha=0.1, mu=0, eta=0, beta=1.,
rho=1., psi='laplacian', n_samples=None, tol=1e-4, rtol=1e-4,
max_iter=100, verbose=0, kernel=None,
update_rho=False, random_state=None,
score_type='likelihood',
assume_centered=False,
compute_objective=True):
LatentMissingGraphicalLasso.__init__(self, mask, mu=mu,
eta=eta,
random_state=random_state)
KernelTimeGraphicalLasso.__init__(
self, alpha=alpha, beta=beta, rho=rho,
tol=tol, rtol=rtol, psi=psi, kernel=kernel,
max_iter=max_iter,
assume_centered=assume_centered)
self.score_type = score_type
self.h = h
self.verbose = verbose
self.compute_objective = compute_objective
def fit(self, X, y):
"""Fit the KernelTimeGraphLasso model to X.
Parameters
----------
X : ndarray, shape = (n_samples * n_times, n_dimensions)
Data matrix.
y : ndarray, shape = (n_times,)
Indicate the temporal belonging of each sample.
"""
# Covariance does not make sense for a single feature
X, y = check_X_y(X, y, accept_sparse=False, dtype=np.float64,
order="C", ensure_min_features=2, estimator=self)
n_dimensions = X.shape[1]
self.classes_, n_samples = np.unique(y, return_counts=True)
n_times = self.classes_.size
if self.assume_centered:
self.location_ = np.zeros((n_times, n_dimensions))
else:
self.location_ = np.array(
[X[y == cl].mean(0) for cl in self.classes_])
emp_cov = np.array([empirical_covariance(X[y == cl],
assume_centered=self.assume_centered)
for cl in self.classes_])
self.precision_, _, self.n_iter_ = latent_missing_time_graphical_lasso(
emp_cov, h=self.h, alpha=self.alpha, M=self.mask, mu=self.mu,
eta=self.eta, beta=self.beta, psi=self.psi, kernel=self.kernel,
assume_centered=self.assume_centered,
tol=self.tol, rtol=self.rtol,
max_iter=self.max_iter, verbose=self.verbose, rho=self.rho,
compute_objective=self.compute_objective,
return_history=True,
return_n_iter=True)
return self
def get_observed_precision(self):
precision = []
for p in self.precision_:
obs = p[self.n_latent_:, self.n_latent_:]
lat = p[:self.n_latent_, :self.n_latent_]
inter = p[:self.n_latent_, self.n_latent_:]
precision.append(obs - inter.T.dot(np.linalg.pinv(lat)).dot(inter))
return np.array(precision)
def score(self, X, y):
n = X.shape[0]
emp_cov = [empirical_covariance(X[y == cl] - self.location_[i],
assume_centered=True) for i, cl in enumerate(self.classes_)]
score_func = {'likelihood': log_likelihood_t,
'bic': BIC_t,
'ebic': partial(EBIC_t, n=n),
'ebicm': partial(EBIC_m_t, n=n)}
try:
score_func = score_func[self.score_type]
except KeyError:
warnings.warn("The score type passed is not available, using log "
"likelihood.")
score_func = log_likelihood_t
precision = self.get_observed_precision()
if not positive_definite(precision):
ensure_posdef(precision)
precision = [p for p in precision]
s = score_func(emp_cov, precision)
return s
class MissingTimeGraphicalLasso(KernelTimeGraphicalLasso):
"""Time-Varying Graphical Lasso with missing data.
This method allows for graphical model selection in presence of missing
data in the dataset. It is suitable to perform imputing after fitting.
Parameters
----------
alpha : positive float, default 0.01
Regularization parameter for precision matrix. The higher alpha,
the more regularization, the sparser the inverse covariance.
kernel : ndarray, default None
Normalised temporal kernel (1 on the diagonal),
with dimensions equal to the dimensionality of the data set.
If None, it is interpreted as an identity matrix, where there is no
constraint on the temporal behaviour of the precision matrices.
psi : {'laplacian', 'l1', 'l2', 'linf', 'node'}, default 'laplacian'
Type of norm to enforce for consecutive precision matrices in time.
rho : positive float, default 1
Augmented Lagrangian parameter.
tol : positive float, default 1e-4
Absolute tolerance to declare convergence.
rtol : positive float, default 1e-4
Relative tolerance to declare convergence.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function, rnorm and snorm are
printed at each iteration.
update_rho_options : dict, default None
Options for the update of rho. See `update_rho` function for details.
compute_objective : boolean, default True
Choose if compute the objective function during iterations
(only useful if `verbose=True`).
Attributes
----------
covariance_ : array-like, shape (n_times, n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_times, n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
"""
def __init__(
self, alpha=0.01, kernel=None, rho=1., tol=1e-4, rtol=1e-4,
psi='laplacian', max_iter=100, verbose=False,
return_history=False,
update_rho_options=None, compute_objective=True, ker_param=1,
max_iter_ext=100):
super(MissingTimeGraphicalLasso, self).__init__(
alpha=alpha, tol=tol, max_iter=max_iter, verbose=verbose,
assume_centered=False, rho=rho,
rtol=rtol, kernel=kernel, psi=psi,
update_rho_options=update_rho_options,
compute_objective=compute_objective)
def fit(self, X, y):
"""Fit the MissingTimeGraphicalLasso model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
y : ndarray, shape (n_samples, 1)
Division in times.
"""
X, y = check_X_y(
X, y, accept_sparse=False, dtype=np.float64, order="C",
ensure_min_features=2, estimator=self,
force_all_finite='allow-nan')
self.classes_, n_samples = np.unique(y, return_counts=True)
X = np.array([X[y == cl] for cl in self.classes_])
self.precision_, self.covariance_, self.complete_data_matrix_, \
self.n_iter_ = missing_time_graphical_lasso(
X, alpha=self.alpha, tol=self.tol,
max_iter=self.max_iter,
verbose=self.verbose, rho=self.rho,
rtol=self.rtol, kernel=self.kernel,
psi=self.psi, return_n_iter=True,
update_rho_options=self.update_rho_options,
compute_objective=self.compute_objective)
return self
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-20 20:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lookup_tables', '0009_infrastructure_elevation'),
]
operations = [
migrations.AddField(
model_name='infrastructure',
name='turbine',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.