content stringlengths 5 1.05M |
|---|
"""
Manual Captcha Harvester
Made by @CrepChef
"""
from utils import Logger
from flask import Flask, request, jsonify, render_template, redirect
import logging
import threading
from datetime import datetime
from time import sleep
import webbrowser
import json
tokens = []
logger = Logger()
def manageTokens():
while True:
for token in tokens:
if token['expiry'] < datetime.now().timestamp():
tokens.remove(token)
logger.error("Token expired and deleted")
sleep(5)
def sendToken():
while not tokens:
pass
token = tokens.pop(0)
return token['token']
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/')
def index():
return render_template('index.html', sitekey=config['sitekey'], domain=config['domain'])
@app.route('/api/submit', methods=['POST'])
def submit():
try:
token = request.form['g-recaptcha-response']
expiry = datetime.now().timestamp() + 115
tokenDict = {
'token': token,
'expiry': expiry
}
tokens.append(tokenDict)
logger.success("Token harvested and stored")
return jsonify({
'success': True,
'error': None,
'result': 'Token harvested and stored'
})
except:
return jsonify({
'success': False,
'error': 'Undocumented error',
'result': None
})
@app.route('/api/count')
def api_count():
return jsonify({
'success': True,
'error': None,
'result': len(tokens)
})
@app.route('/api/token')
def api_fetch_token():
try:
token = tokens.pop(0)
logger.status("Token requested and returned to user")
return jsonify({
'success': True,
'error': None,
'results': token['token']
})
except:
logger.warn("Token requested but none available")
return jsonify({
'success': False,
'error': 'Token requested but none available',
'result': None
})
if __name__ == '__main__':
threading.Thread(target=manageTokens).start()
with open('config.json') as file:
config = json.load(file)
file.close()
logger.log("*****************************************************")
logger.log("Manual Captcha Harvester | CrepChef")
logger.log("*****************************************************")
logger.log("Server running at harvester.{}:5000".format(config['domain']))
webbrowser.open('http://harvester.{}:5000/'.format(config['domain']))
app.run() |
"""
======================================
Thresholding an Image with RangeSlider
======================================
Using the RangeSlider widget to control the thresholding of an image.
The RangeSlider widget can be used similarly to the `.widgets.Slider`
widget. The major difference is that RangeSlider's ``val`` attribute
is a tuple of floats ``(lower val, upper val)`` rather than a single float.
See :doc:`/gallery/widgets/slider_demo` for an example of using
a ``Slider`` to control a single float.
See :doc:`/gallery/widgets/slider_snap_demo` for an example of having
the ``Slider`` snap to discrete values.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import RangeSlider
# generate a fake image
np.random.seed(19680801)
N = 128
img = np.random.randn(N, N)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
plt.subplots_adjust(bottom=0.25)
im = axs[0].imshow(img)
axs[1].hist(img.flatten(), bins='auto')
axs[1].set_title('Histogram of pixel intensities')
# Create the RangeSlider
slider_ax = plt.axes([0.20, 0.1, 0.60, 0.03])
slider = RangeSlider(slider_ax, "Threshold", img.min(), img.max())
# Create the Vertical lines on the histogram
lower_limit_line = axs[1].axvline(slider.val[0], color='k')
upper_limit_line = axs[1].axvline(slider.val[1], color='k')
def update(val):
# The val passed to a callback by the RangeSlider will
# be a tuple of (min, max)
# Update the image's colormap
im.norm.vmin = val[0]
im.norm.vmax = val[1]
# Update the position of the vertical lines
lower_limit_line.set_xdata([val[0], val[0]])
upper_limit_line.set_xdata([val[1], val[1]])
# Redraw the figure to ensure it updates
fig.canvas.draw_idle()
slider.on_changed(update)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.widgets.RangeSlider`
|
# apis_v1/documentation_source/position_list_for_voter_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def position_list_for_voter_doc_template_values(url_root):
"""
Show documentation about positionListForVoter
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'stance',
'value': 'string', # boolean, integer, long, string
'description': 'Default is ANY_STANCE. '
'Other options include SUPPORT, STILL_DECIDING, INFO_ONLY, NO_STANCE, OPPOSE, '
'PERCENT_RATING',
},
{
'name': 'friends_vs_public',
'value': 'string', # boolean, integer, long, string
'description': 'Default is FRIENDS_AND_PUBLIC. '
'Other options include FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC',
},
{
'name': 'google_civic_election_id',
'value': 'integer', # boolean, integer, long, string
'description': 'The unique identifier for a particular election. If not provided, return all positions'
' for this organization. If this variable is included, state_code will be ignored.',
},
{
'name': 'state_code',
'value': 'string', # boolean, integer, long, string
'description': 'The us state we want ballot item positions for. '
},
{
'name': 'show_only_this_election',
'value': 'boolean', # boolean, integer, long, string
'description': 'The default is \'True\'. Only show positions about things on the current ballot. ',
},
{
'name': 'show_all_other_elections',
'value': 'boolean', # boolean, integer, long, string
'description': 'The default is \'False\'. Show the positions for this ballot item that are NOT on this '
'voter\'s ballot.',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
{
'code': 'UNABLE_TO_RETRIEVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING',
'description': 'Cannot proceed. Neither candidate_id nor measure_id were included.',
},
{
'code': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS',
'description': 'The number of opposes for this ballot item was retrieved.',
},
{
'code': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_NOT_FOLLOWED',
'description': 'The number of organizations that oppose this ballot item that voter is NOT following.',
},
]
try_now_link_variables_dict = {
'stance': 'ANY_STANCE',
'friends_vs_public': 'FRIENDS_AND_PUBLIC',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "count": integer,\n' \
' "friends_vs_public": string ' \
' (One of these: \'FRIENDS_ONLY\', \'PUBLIC_ONLY\', \'FRIENDS_AND_PUBLIC\'),\n' \
' "voter_we_vote_id": string,\n' \
' "voter_display_name": string,\n' \
' "voter_image_url_https_large": string,\n' \
' "voter_image_url_https_medium": string,\n' \
' "voter_image_url_https_tiny": string,\n' \
' "google_civic_election_id": integer,\n' \
' "state_code": string,\n' \
' "position_list": list\n' \
' "show_only_this_election": boolean (True if only returning positions for voter\'s ballot),\n' \
' "show_all_other_elections": boolean (True if returning positions NOT on voter\'s ballot,\n' \
' [\n' \
' "position_we_vote_id": string,\n' \
' "ballot_item_display_name": string (either measure name or candidate name),\n' \
' "ballot_item_id": integer,\n' \
' "ballot_item_image_url_https_large": string,\n' \
' "ballot_item_image_url_https_medium": string,\n' \
' "ballot_item_image_url_https_tiny": string,\n' \
' "ballot_item_twitter_handle": string,\n' \
' "ballot_item_we_vote_id": string,\n' \
' "ballot_item_political_party": string,\n' \
' "ballot_item_state_code": string,\n' \
' "contest_office_id": integer,\n' \
' "contest_office_we_vote_id": string,\n' \
' "contest_office_name": string (The name of the office if kind_of_ballot_item is CANDIDATE),\n' \
' "kind_of_ballot_item": string, ' \
' (One of these: \'CANDIDATE\', \'MEASURE\', \'OFFICE\', \'UNKNOWN\')\n' \
' "is_support": boolean,\n' \
' "is_positive_rating": boolean,\n' \
' "is_support_or_positive_rating": boolean,\n' \
' "is_oppose": boolean,\n' \
' "is_negative_rating": boolean,\n' \
' "is_oppose_or_negative_rating": boolean,\n' \
' "is_information_only": boolean,\n' \
' "more_info_url": string,\n' \
' "statement_text": string,\n' \
' "statement_html": string,\n' \
' "google_civic_election_id": integer,\n' \
' "last_updated": string,\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'positionListForVoter',
'api_slug': 'positionListForVoter',
'api_introduction':
"A list of all positions (support/oppose/info) held by this voter. ",
'try_now_link': 'apis_v1:positionListForVoterView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
# Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import yaml
class DefaultResolver(object):
def resolve(self, questions):
"""
For each question, map the answer to a parameter value.
"""
values = {}
for q in questions:
values[q['param']] = q['_answer']
return values
def replace(self, payload, values):
for b in payload['backend']:
# Check the storage.
if b['storage']['instances'] in values.keys():
b['storage']['instances'] = values[b['storage']['instances']]
# Check the compute.
if 'compute' in b:
for c in b['compute']:
if c['instances'] in values.keys():
c['instances'] = values[c['instances']]
return payload
|
#!/usr/bin/env python3
import argparse
import operator
import threading
import numpy as np
from time import sleep
import cv2
import depthai as dai
import socket
from common.config import NN_IMG_SIZE
from common.cscore_stream import CsCoreStream
from pipelines import goal_edge_depth_detection, object_edge_detection
import logging
from common import target_finder
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
class Main:
def __init__(self):
log.info("Connected Devices:")
for device in dai.Device.getAllAvailableDevices():
log.info(f"{device.getMxId()} {device.state}")
self.init_networktables()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
except:
ip_address = 'localhost'
port1 = 5801
port2 = 5802
self.device_list = {"OAK-D_Goal": {
'name': "OAK-D_Goal",
'id': "14442C1091398FD000",
# 'id': "14442C10218CCCD200",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port1),
'nt_tab': NetworkTables.getTable("OAK-D_Goal")
}, "OAK-1_Intake": {
'name': "OAK-1_Intake",
'id': "14442C1011043ED700",
# 'id': "14442C10C14F47D700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port2),
'nt_tab': NetworkTables.getTable("OAK-1_Intake")
}}
self.goal_pipeline, self.goal_labels = goal_edge_depth_detection.create_pipeline("infiniteRecharge2021")
self.intake_pipeline, self.intake_labels = object_edge_detection.create_pipeline("infiniteRecharge2021")
self.oak_d_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW', QUALITY=10)
self.oak_1_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port2, colorspace='BW', QUALITY=10)
# self.oak_d_stream = CsCoreStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW', QUALITY=10)
# self.oak_1_stream = CsCoreStream(IP_ADDRESS=ip_address, HTTP_PORT=port2, colorspace='BW', QUALITY=10)
def parse_goal_frame(self, frame, edgeFrame, bboxes):
kernel = np.ones((3, 3), np.uint8)
edgeFrame = cv2.morphologyEx(edgeFrame, cv2.MORPH_CLOSE, kernel, iterations=1)
# edgeFrame = cv2.threshold(edgeFrame, 20, 255, cv2.THRESH_TOZERO)[1]
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
nt_tab = self.device_list['OAK-D_Goal']['nt_tab']
if len(bboxes) == 0:
nt_tab.putString("target_label", "None")
nt_tab.putNumber("tv", 0)
else:
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
edgeFrame, target_x, target_y = target_finder.find_largest_hexagon_contour(edgeFrame, bbox)
if target_x == -999 or target_y == -999:
log.error("Error: Could not find target contour")
continue
angle_offset = (target_x - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
if abs(angle_offset) > 30:
log.info("Invalid angle offset. Setting it to 0")
nt_tab.putNumber("tv", 0)
angle_offset = 0
else:
log.info("Found target '{}'\tX Angle Offset: {}".format(target_label, angle_offset))
nt_tab.putNumber("tv", 1)
nt_tab.putString("target_label", target_label)
nt_tab.putNumber("tx", angle_offset)
nt_tab.putNumber("tz", bbox['depth_z'])
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']),
(255, 255, 255), 2)
cv2.circle(edgeFrame, (int(round(target_x, 0)), int(round(target_y, 0))), radius=5, color=(128, 128, 128),
thickness=-1)
bbox['target_x'] = target_x
bbox['target_y'] = target_y
bbox['angle_offset'] = angle_offset
fps = self.device_list['OAK-D_Goal']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_d_stream.send_frame(edgeFrame)
return frame, edgeFrame, bboxes
def parse_intake_frame(self, frame, edgeFrame, bboxes):
edgeFrame = cv2.threshold(edgeFrame, 60, 255, cv2.THRESH_TOZERO)[1]
valid_labels = ['power_cell']
nt_tab = self.device_list['OAK-1_Intake']['nt_tab']
filtered_bboxes = []
for bbox in bboxes:
if self.intake_labels[bbox['label']] in valid_labels:
filtered_bboxes.append(bbox)
filtered_bboxes.sort(key=operator.itemgetter('size'), reverse=True)
if len(filtered_bboxes) == 0:
nt_tab.putNumber("tv", 0)
nt_tab.putNumberArray("ta", [0])
else:
nt_tab.putNumber("tv", 1)
target_angles = []
for bbox in filtered_bboxes:
angle_offset = (bbox['x_mid'] - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (255, 255, 255), 2)
target_angles.append(angle_offset)
bbox['angle_offset'] = angle_offset
nt_tab.putNumberArray("ta", target_angles)
fps = self.device_list['OAK-1_Intake']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_1_stream.send_frame(edgeFrame)
return frame, edgeFrame, filtered_bboxes
def init_networktables(self):
NetworkTables.startClientTeam(4201)
if not NetworkTables.isConnected():
log.info("Could not connect to team client. Trying other addresses...")
NetworkTables.startClient([
'10.42.1.2',
'127.0.0.1',
'10.0.0.2',
'192.168.100.108'
])
if NetworkTables.isConnected():
log.info("NT Connected to {}".format(NetworkTables.getRemoteAddress()))
return True
else:
log.error("Could not connect to NetworkTables. Restarting server...")
return False
def run(self):
log.info("Setup complete, parsing frames...")
threadlist = []
try:
found_1, device_info_1 = dai.Device.getDeviceByMxId(self.device_list['OAK-D_Goal']['id'])
self.device_list['OAK-D_Goal']['nt_tab'].putBoolean("OAK-D_Goal Status", found_1)
if found_1:
th1 = threading.Thread(target=self.run_goal_detection, args=(device_info_1,))
th1.start()
threadlist.append(th1)
found_2, device_info_2 = dai.Device.getDeviceByMxId(self.device_list['OAK-1_Intake']['id'])
self.device_list['OAK-1_Intake']['nt_tab'].putBoolean("OAK-1_Intake Status", found_2)
if found_2:
th2 = threading.Thread(target=self.run_intake_detection, args=(device_info_2,))
th2.start()
threadlist.append(th2)
while True:
for t in threadlist:
if not t.is_alive():
break
sleep(10)
finally:
log.info("Exiting Program...")
def run_goal_detection(self, device_info):
self.device_list['OAK-D_Goal']['nt_tab'].putString("OAK-D_Goal Stream", self.device_list['OAK-D_Goal']['stream_address'])
for frame, edgeFrame, bboxes in goal_edge_depth_detection.capture(device_info):
self.parse_goal_frame(frame, edgeFrame, bboxes)
def run_intake_detection(self, device_info):
self.device_list['OAK-1_Intake']['nt_tab'].putString("OAK-1 Stream", self.device_list['OAK-1_Intake']['stream_address'])
for frame, edgeFrame, bboxes in object_edge_detection.capture(device_info):
self.parse_intake_frame(frame, edgeFrame, bboxes)
class MainDebug(Main):
def __init__(self):
super().__init__()
def parse_goal_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_goal_frame(frame, edgeFrame, bboxes)
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
target_x = bbox['target_x'] if 'target_x' in bbox else 0
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
cv2.putText(edgeFrame, "x: {}".format(round(target_x, 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "z: {}".format(round(bbox['depth_z'], 2)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "label: {}".format(self.goal_labels[bbox['label']], 1), (bbox['x_min'], bbox['y_min'] + 130),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-D Goal Edge", edgeFrame)
cv2.imshow("OAK-D Goal ", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
def parse_intake_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_intake_frame(frame, edgeFrame, bboxes)
for i, bbox in enumerate(bboxes):
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
frame_color = (0, 255, 0) if i == 0 else (0, 150, 150)
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), frame_color, 2)
cv2.putText(frame, "x: {}".format(round(bbox['x_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "size: {}".format(round(bbox['size'], 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Intake Edge", edgeFrame)
cv2.imshow("OAK-1 Intake", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
log.info("Starting goal-depth-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
|
import logging
from nuts.connectionhandling.connection import Connection
from nuts.inventorymanagement.inventory import Inventory
from nuts.testcreation.network_test_bundle import TestBundle
from nuts.inventorymanagement.network_test_definition_loader import TestDefinitionLoader
from nuts.testhandling.network_test_order import TestOrder
from nuts.utilities.file_handler import FileHandler
class TestBuilder:
"""
This class is responsible for the creation of the test-cases from
predefined test-definitions and a prepared device-inventory.
...
Attributes
----------
logger
Instance of the logger class
network_test_bundle
reference to the TestBundle-class that is responsible for the
instantiation of the concrete test-classes from the test-definitions
connection
reference to the Connection-class that is responsible for the selection
of a connection type based on the device type and the possible
connections for a single device
inventory
reference to the inventory-class that is responsible for the creation
of an inventory with specifications of the different devices in a
network and the connections between them
network_test_definition_loader
reference to the TestDefinitionLoader-class that is responsible for
loading the network test definitions from a testdefinitions-file
network_test_definitions
collection of testdefinitions that gets first passed to the connection
to determine, which connection the tests should use and is then
passed to the network_test_bundle to instantiate the concrete tests
network_tests
collection of concrete tests that are returned to the
network_test_controller to be executed against a network
Methods
------
get_test_definitions()
loads test definitions from the test_definition_loader
connect_device_objects()
connects devices from the inventory with the test definitions
get_runnable_tests()
calls the network_test_bundle class to instantiate concrete tests
according to the test definitions
"""
def __init__(self, args):
self.logger = logging.getLogger(__name__)
self.inventory = Inventory()
self.connection = Connection()
self.network_test_definition_loader = TestDefinitionLoader()
self.network_test_bundle = TestBundle()
self.network_test_order = TestOrder()
self.network_test_definitions = {}
self.network_tests = []
self.get_test_definitions()
self.connect_device_objects()
self.connection.define_connection(self.network_test_definitions)
self.file_handler = FileHandler()
if args.r:
self.skip_ui = True
else:
self.skip_ui = self.file_handler.read_config("skip-UI")
if not self.skip_ui:
self.network_test_order.define_test_order(self.network_test_definitions)
self.get_runnable_tests()
def get_test_definitions(self):
"""
Loads test definitions from the network_test_definition_loader into
the network_test_definitions collection
"""
loader = self.network_test_definition_loader
definitions = loader.create_test_definition_object()
self.network_test_definitions = definitions
self.logger.info("Unordered Test-Definitions loaded")
def connect_device_objects(self):
"""
Maps devices defined in the network_test_definitions collection to
specified devices from the inventory
"""
for test in self.network_test_definitions:
test_device = self.network_test_definitions[test].get_test_devices()
device = self.inventory.devices[test_device]
self.network_test_definitions[test].set_test_device(device)
self.logger.info("Device Objects connected to Test-Definitions")
def get_runnable_tests(self):
"""
Gets concrete tests for the tests specified in the
network_test_definitions collection from the network_test_bundle class
"""
if self.skip_ui:
test_definitions = []
for definition in self.network_test_definitions.values():
test_definitions.append(definition)
else:
test_definitions = self.network_test_order.get_ordered_test_definitions()
test_bundle = self.network_test_bundle.create_test_bundle(test_definitions)
self.network_tests = test_bundle
self.logger.info("Runnable Tests created")
def get_network_tests(self):
return self.network_tests
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-23 07:11
from __future__ import unicode_literals
import aether.python.validators
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kernel', '0020_entity_schema'),
]
operations = [
migrations.AlterField(
model_name='entity',
name='mapping_revision',
field=models.TextField(blank=True, null=True, verbose_name='mapping revision'),
),
# include field validators
migrations.AlterField(
model_name='mapping',
name='definition',
field=django.contrib.postgres.fields.jsonb.JSONField(validators=[aether.python.validators.validate_mapping_definition], verbose_name='mapping rules'),
),
migrations.AlterField(
model_name='mappingset',
name='schema',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[aether.python.validators.validate_avro_schema], verbose_name='AVRO schema'),
),
migrations.AlterField(
model_name='schema',
name='definition',
field=django.contrib.postgres.fields.jsonb.JSONField(validators=[aether.python.validators.validate_schema_definition], verbose_name='AVRO schema'),
),
]
|
class cat:
""" Cat class
"""
def __init__(self, name, color, age):
self.name = name
self.color = color
self.age = age
@staticmethod
def purr():
print("purrrrrr")
class dog:
""" Dog Class
"""
def __init__(self, name, color, age):
self.name = name
self.color = color
self.age = age
@staticmethod
def bark():
print("woof woof")
def wagtail(self):
print(self.name + "'s tail is wagging!")
def fetch(self, thrownObject='ball'):
print(self.name, "fetched the ", thrownObject) |
import uuid
import os
from django.test import TestCase
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.form_processor.tests.utils import use_sql_backend
from corehq.util.test_utils import TestFileMixin
class XMLElementTest(TestCase, TestFileMixin):
file_path = ('data',)
root = os.path.dirname(__file__)
def test_various_encodings(self):
tests = (
('utf-8', 'हिन्दी चट्टानों'),
('UTF-8', 'हिन्दी चट्टानों'),
('ASCII', 'hello'),
)
xml_template = self.get_xml('encoding').decode('utf-8')
for encoding, value in tests:
xml_data = xml_template.format(
encoding=encoding,
form_id=uuid.uuid4().hex,
sample_value=value,
)
xform = submit_form_locally(xml_data.encode('utf-8'), 'test-domain').xform
self.assertEqual(value, xform.form_data['test'])
elem = xform.get_xml_element()
self.assertEqual(value, elem.find('{http://commcarehq.org/couchforms-tests}test').text)
@use_sql_backend
class XMLElementTestSQL(XMLElementTest):
pass
|
# -*- coding: utf-8 -*-
from dgp.core.controllers.controller_interfaces import VirtualBaseController
from .project import ProjectTab, AirborneProjectController
from .flight import FlightTab, FlightController
from .dataset import DataSetTab, DataSetController
__all__ = ['ProjectTab', 'FlightTab', 'DataSetTab', 'tab_factory']
# Note: Disabled ProjectTab/FlightTab until they are implemented
_tabmap = {
# AirborneProjectController: ProjectTab,
FlightController: FlightTab,
DataSetController: DataSetTab
}
def tab_factory(controller: VirtualBaseController):
"""Return the workspace tab constructor for the given controller type"""
return _tabmap.get(controller.__class__, None)
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: reduce_all"""
import akg.topi
import akg.tvm
import akg
from akg.utils import validation_check as vc_util
from akg.utils import format_transform as ft_util
from akg.utils import dsl_create as dc
@vc_util.check_input_type(akg.tvm.tensor.Tensor, (int, list, tuple, type(None)), (bool, type(None)))
def reduce_all(data, axis=None, keepdims=False):
"""
Computes logical and of the input tensor.
Args:
data(tvm.tensor.Tensor): Tensor of type Boolean.
axis(Union[None, int, list, tuple]): Specifies which axes to reduce, if None, all dimensions of
input tensor data will be reduced and the shape of output tensor will be (1,).
keepdims(Union[None, bool]): if true, keep the dimensions with length 1.
Returns:
tvm.tensor.Tensor of same type as input tensor data.
"""
shape = [x.value for x in data.shape]
vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.BOOL)
vc_util.check_shape(shape)
if axis is None and keepdims is False:
raise ValueError("keepdims must be True when axis is None!")
axis_new = ft_util.refine_reduce_axis(data, axis)
xx1 = akg.tvm.compute(shape, lambda *indice: data(*indice).astype("float16"), name='xx1')
xx = (-xx1 + dc.one_const("float16"))
yy = akg.topi.sum(xx, axis=axis_new, keepdims=keepdims)
o_shape = list(yy.shape)
zz = akg.tvm.compute(o_shape, lambda *indice: yy(*indice).astype("bool"), name='zz')
y1 = akg.tvm.compute(o_shape, lambda *indice: akg.tvm.expr.Select(zz(*indice), dc.zero_const("float16"), dc.one_const("float16")), name="y1")
y = akg.tvm.compute(o_shape, lambda *indice: y1(*indice).astype("bool"), name='y')
return y
|
import os
import csv
from dotenv import load_dotenv
import psycopg2
import json
from psycopg2.extras import execute_values
load_dotenv() # loads contents of the .env file into the script's environment
DB_NAME = os.getenv('DB_NAME')
DB_USER = os.getenv('DB_USER')
DB_PASSWORD = os.getenv('DB_PASSWORD')
DB_HOST = os.getenv('DB_HOST')
print(DB_NAME, DB_USER, DB_PASSWORD, DB_HOST)
# Connect to SQL-hosted PostgreSQL
connection = psycopg2.connect(
dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
print('CONNECTION', connection)
# A "cursor", a structure to iterate over db records to perform queries
cursor = connection.cursor()
print('CURSOR', cursor)
sql_create = '''DROP TABLE IF EXISTS
person_table;
CREATE TABLE person_table (
id SERIAL PRIMARY KEY,
Survived int,
Pclass int,
Name varchar(100),
Sex varchar(6),
age float,
SiblingsorSpouses int,
ParentsorChildren int,
Fare float
);
'''
# Avoiding the table already exists error
cursor.execute(sql_create)
# Avoiding the table already exists error
connection.commit()
# Assign the titanic csv to a reader object
reader = list(csv.reader(open('titanic.csv', 'r')))
# Record count
record_count = 0
# Insert information into the database
for row in reader[1:]:
sqlInsert = '''INSERT INTO person_table (Survived, Pclass, Name, Sex, age, SiblingsorSpouses, ParentsorChildren, Fare)
VALUES(%s, %s, %s, %s, %s, %s, %s,%s) '''
# Execute query and commit changes.
cursor.execute(sqlInsert, (row[0],
row[1],
row[2],
row[3],
row[4],
row[5],
row[6],
row[7]
))
# Increment the record count.
record_count += 1
connection.commit()
# How many passengers survived, and how many died?
num_passengers_s_d = '''
SELECT Survived, COUNT(*)
FROM person_table
GROUP by Survived;
'''
cursor.execute(num_passengers_s_d)
num_passengers_s_d = cursor.fetchall()
print('The number of passengers that died and survived was',
num_passengers_s_d[0][1], 'and', num_passengers_s_d[1][1], 'respectively')
# #ow many passengers were in each class?
num_passengers_c = '''
SELECT pclass, COUNT(*)
FROM person_table
GROUP by pclass;
'''
cursor.execute(num_passengers_c)
num_passengers_c = cursor.fetchall()
print('The number of passengers in class 1 was',
num_passengers_c[2][1], ', class 2:', num_passengers_c[1][1], 'and class 3:', num_passengers_c[0][1])
# How many passengers survived/died within each class?
num_passengers_s_d_1 = '''
SELECT Survived, COUNT(*)
FROM person_table
WHERE pclass = 1
GROUP by Survived;
'''
cursor.execute(num_passengers_s_d_1)
num_passengers_s_d_1 = cursor.fetchall()
print('The number of passengers that died and survived in class 1 was',
num_passengers_s_d_1[0][1], 'and', num_passengers_s_d_1[1][1], 'respectively')
num_passengers_s_d_2 = '''
SELECT Survived, COUNT(*)
FROM person_table
WHERE pclass = 2
GROUP by Survived;
'''
cursor.execute(num_passengers_s_d_2)
num_passengers_s_d_2 = cursor.fetchall()
print('The number of passengers that died and survived in class 2 was',
num_passengers_s_d_2[0][1], 'and', num_passengers_s_d_2[1][1], 'respectively')
num_passengers_s_d_3 = '''
SELECT Survived, COUNT(*)
FROM person_table
WHERE pclass = 3
GROUP by Survived;
'''
cursor.execute(num_passengers_s_d_3)
num_passengers_s_d_3 = cursor.fetchall()
print('The number of passengers that died and survived in class 3 was',
num_passengers_s_d_3[0][1], 'and', num_passengers_s_d_3[1][1], 'respectively')
# What was the average age of survivors vs nonsurvivors?
avg_age_s_d = '''
SELECT Survived, AVG(age)
FROM person_table
GROUP by Survived;
'''
cursor.execute(avg_age_s_d)
avg_age_s_d = cursor.fetchall()
print('The average age of survivors vs non-survivors was',
'{0:.2f}'.format(avg_age_s_d[1][1]), 'and', '{0:.2f}'.format(avg_age_s_d[0][1]), 'respectively')
# What was the average age of each passenger class?
avg_age_p_c = '''
SELECT pclass, AVG(age)
FROM person_table
GROUP by pclass;
'''
cursor.execute(avg_age_p_c)
avg_age_p_c = cursor.fetchall()
print('The average age, in years, of passenger class 1 was',
'{0:.2f}'.format(avg_age_p_c[2][1]), ', class 2:', '{0:.2f}'.format(avg_age_p_c[1][1]), 'and class 3:', '{0:.2f}'.format(avg_age_p_c[0][1]))
# What was the average fare by passenger class?
avg_fare_p_c = '''
SELECT pclass, AVG(fare)
FROM person_table
GROUP by pclass;
'''
cursor.execute(avg_fare_p_c)
avg_fare_p_c = cursor.fetchall()
print('The average fare of passenger class 1 was',
'{0:.2f}'.format(avg_fare_p_c[2][1]), ', class 2:', '{0:.2f}'.format(avg_fare_p_c[1][1]), 'and class 3:', '{0:.2f}'.format(avg_fare_p_c[0][1]))
# What was the average fare by survival?
avg_fare_s_d = '''
SELECT Survived, AVG(fare)
FROM person_table
GROUP by Survived;
'''
cursor.execute(avg_fare_s_d)
avg_fare_s_d = cursor.fetchall()
print('The average fare by survival was:',
'{0:.2f}'.format(avg_fare_s_d[1][1]), 'for survivors and', '{0:.2f}'.format(avg_fare_s_d[0][1]), 'for non-survivors')
# How many siblings/spouses aboard on average, by passenger class?
avg_sib_spouses_p_c = '''
SELECT pclass, AVG(SiblingsorSpouses)
FROM person_table
GROUP by pclass;
'''
cursor.execute(avg_sib_spouses_p_c)
avg_sib_spouses_p_c = cursor.fetchall()
print('The average number of siblings and/or spouses for class 1 was',
'{0:.2f}'.format(avg_sib_spouses_p_c[2][1]), ', class 2:', '{0:.2f}'.format(avg_sib_spouses_p_c[1][1]), 'and class 3:', '{0:.2f}'.format(avg_sib_spouses_p_c[0][1]))
# How many siblings/spouses aboard on average, by survival?
avg_sib_spouses_s_d = '''
SELECT Survived, AVG(SiblingsorSpouses)
FROM person_table
GROUP by Survived;
'''
cursor.execute(avg_sib_spouses_s_d)
avg_sib_spouses_s_d = cursor.fetchall()
print('The average number of siblings and/or spouses by survival was:',
'{0:.2f}'.format(avg_sib_spouses_s_d[1][1]), 'for survivors and', '{0:.2f}'.format(avg_sib_spouses_s_d[0][1]), 'for non-survivors')
# How many parents/children aboard on average, by passenger class?
avg_parents_children_p_c = '''
SELECT pclass, AVG(parentsorchildren)
FROM person_table
GROUP by pclass;
'''
cursor.execute(avg_parents_children_p_c)
avg_parents_children_p_c = cursor.fetchall()
print('The average number of parents and/or children for class 1 was',
'{0:.2f}'.format(avg_parents_children_p_c[2][1]), ', class 2:', '{0:.2f}'.format(avg_parents_children_p_c[1][1]), 'and class 3:', '{0:.2f}'.format(avg_parents_children_p_c[0][1]))
# How many parents/children aboard on average, by survival?
avg_parents_children_s_d = '''
SELECT Survived, AVG(parentsorchildren)
FROM person_table
GROUP by survived;
'''
cursor.execute(avg_parents_children_s_d)
avg_parents_children_s_d = cursor.fetchall()
print('The average number of siblings and/or spouses by survival was:',
'{0:.2f}'.format(avg_parents_children_s_d[1][1]), 'for survivors and', '{0:.2f}'.format(avg_parents_children_s_d[0][1]), 'for non-survivors')
# Do any passengers have the same name?
duplicate_names = '''
SELECT name, COUNT(name)
FROM person_table
GROUP BY name
HAVING COUNT(name) > 1;
'''
cursor.execute(duplicate_names)
duplicate_names = cursor.fetchall()
print('The number of passengers with duplicate names is:',
duplicate_names)
cursor.close()
connection.close()
|
src_field_name = 'src'
tgt_field_name = 'tgt'
fname_field_name = 'fname'
|
import cmath
from unittest import TestCase
from ua_model.MapFromWtoT import MapFromWtoT
class TestMapFromWtoT(TestCase):
def test_validation(self):
self.assertIsInstance(MapFromWtoT(t_0=1.0, t_in=2.0), MapFromWtoT) # OK
self.assertRaises(ValueError, MapFromWtoT, t_0=-0.1, t_in=1.0) # a negative parameter
self.assertRaises(ValueError, MapFromWtoT, t_0=0.1, t_in=0.0) # t_in < t_0
def test___call__(self):
f = MapFromWtoT(t_0=0.0, t_in=0.25)
test_cases = [
{'W': 2.0, 'expected_t': -4.0 / 9.0},
{'W': 0.5j, 'expected_t': 4.0 / 25.0},
{'W': -2j, 'expected_t': 4.0 / 25.0},
{'W': -0.1 + 0.2j, 'expected_t': 0.031069896194 + 0.035343944637j},
]
for case in test_cases:
with self.subTest(case=case):
self.assertTrue(
cmath.isclose(f(case['W']), case['expected_t'])
)
|
from bs4 import BeautifulSoup
import requests
import os.path
import re
import pandas as pd
import numpy as np
import csv
import json
from datetime import datetime
from tqdm import tqdm
from collections import defaultdict
from collections import Counter
from IPython.core.display import HTML
import time
import nltk
import heapq
# custom libraries
from data_collection import *
from tsv_management import *
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import PorterStemmer
########################################################################################################################################################
# search_engine.py #
# #
# library of function useful to process every document for the search, initialize the search engine, and perform the actual search #
# #
########################################################################################################################################################
##########################################################################
# #
# functions to preprocess the text to have it ready for a search #
# #
##########################################################################
def preprocessing(text):
'''
this function preprocesses a string to prepare it either for the inverted
index creation or for the search of the query
in details, here we:
- tokenize the string (with a regex tokenizer)
- convert the words to lowercase
- remove the english stopwords
- stem the words using Porter stemmer
input: string to preprocess
output: preprocessed list of words
'''
# initialize tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# initialize the stemmer
porter = PorterStemmer()
# tokenize the text
word_list = tokenizer.tokenize(text)
processed_text = []
for word in word_list:
if word.lower() not in stopwords.words('english'):
stemmed_word = porter.stem(word)
processed_text.append(stemmed_word)
return(processed_text)
def list_preprocessing( list_of_strings ):
'''
this function preprocesses every string in a list of strings
input: list of strings
output: None
non-explicit output: every string in the list is preprocessed and becomes a list of words
'''
for i in tqdm(range(len(list_of_strings)), desc="Preprocessing documents"):
if list_of_strings[i] is not None:
list_of_strings[i] = preprocessing(list_of_strings[i])
else:
list_of_strings[i] = []
return
##########################################################################
# #
# functions to create and manage a vocabulary that maps each word #
# of our anime descriptions to an integer number #
# #
##########################################################################
def vocabulary_generation():
'''
NOTE: there are two different tqdm progress bar called in this function
this function generates a vocabulary using all the anime descriptions
and saves everything in a json file
'''
# retrieving the descriptions of all the anime
description = column_retrieval('animeDescription')
# preprocessing every description
list_preprocessing( description )
# generating a vocabulary of words that associates every word to an integer
vocabulary = vocabulary_creation(description)
# saving the vocabulary to the disk
with open("content/vocabulary.json", 'w') as f:
json.dump(vocabulary, f)
return
def vocabulary_creation(list_of_lists):
'''
here we create a vocabulary of all words from a list of lists of words
input: a list that contains lists of words
output: dictionary that associates words to integers starting from 0
'''
# initializing the set of all the words
set_of_words = set()
for words_list in list_of_lists:
# adding the words to the set of all the words
set_of_words.update(words_list)
# initializing the vocabulary
vocabulary = {}
for i in range(len(set_of_words)):
# assigning to a random word the value i
vocabulary[ set_of_words.pop() ] = i
return( vocabulary )
def vocabulary_retrieval():
'''
this function reads the vocabulary from the disk
and returns it as a dictionary
input: None
output: vocabulary dictionary
'''
term_dict = json_loading('./content/vocabulary.json')
return(term_dict)
def json_loading(path):
'''
this function parses a json file in path and returns it
input: json file path
output: data retrieved from the json file
'''
with open(path) as json_file:
data = json.load(json_file)
return(data)
def vocabulary_inversion(vocabulary):
'''
reverses the input vocabulary
input: dictionary {key1:value1, ...}
output: dictionary {value1:key, ...}
'''
inverted_vocabulary = {value : key for (key, value) in vocabulary.items()}
return(inverted_vocabulary)
def vocabulary_conversion(words_list, vocabulary):
'''
this function converts a list of words according to a certain vocabulary
input: (list of words to convert, vocabulary)
output: list of word ids according to the vocabulary
'''
ids = []
for word in words_list:
if word in vocabulary.keys():
ids.append(vocabulary[word])
else:
ids = []
break
return(ids)
#################################################################################################################################################################################
# #############################################################################
# the following set of functions are used in the simple unranked search engine #############################################################################
# which only performs a conjunctive search on the words of the query #############################################################################
# #############################################################################
#################################################################################################################################################################################
##########################################################################
# #
# functions to create and manage the inverted index #
# #
##########################################################################
def unranked_inverted_index_creation(list_of_documents, vocabulary):
'''
this function builds an inverted index using a list of documents and a vocabulary
NOTE: for simplicity of search, every word in our inverted index will
belong to a dummy 0 document that contains every word
(our anime documents are indexed starting from 1)
NOTE: because we only consider document_ids in increasing order,
our inverted index is automatically sorted
input: (list of the (preprocessed) documents, vocabulary of terms)
output: list containing the inverted index
NOTE: inverted_index[i] will be the inverted list
associated with the word vocabulary[i]
'''
number_of_words = len(vocabulary)
# initializing the inverted index list with lists that contain 0
inverted_index = []
for i in range(number_of_words):
inverted_index.append(list([0]))
for i in tqdm(range(len(list_of_documents)), desc="Building the inverted index"):
# our documents start from 1
document_id = i+1
document = list_of_documents[i]
for word in document:
# converting the word to its id according to the vocabulary
word_id = vocabulary[word]
if document_id not in inverted_index[word_id]: # if the document id isn't already associated to the current word id
inverted_index[word_id].append(document_id) # then we add it to the corresponding list
return ( inverted_index )
def unranked_inverted_index_generation():
'''
NOTE: there are three different tqdm progress bar called in this function
this function generates an inverted index using all the anime descriptions
and saves everything in a json file
'''
# retrieving the descriptions of all the anime
description = column_retrieval('animeDescription')
# processing every every description
list_preprocessing( description )
# retrieving the vocabulary from the disk
vocabulary = vocabulary_retrieval()
# generating an inverted index list
inverted_index = unranked_inverted_index_creation(description, vocabulary)
# saving the inverted index to the disk
with open("content/unranked_inverted_index.json", 'w') as f:
json.dump(inverted_index, f)
return
def unranked_inverted_index_retrieval():
'''
this function reads the unranked inverted index from the disk
and returns it as a list
input: None
output: inverted index list
'''
inverted_index = json_loading('./content/unranked_inverted_index.json')
return(inverted_index)
###############################################################################################
# #
# functions used to intersect two or more elements of the inverted index #
# #
###############################################################################################
def intersection_pointers(inverted_words):
'''
NOTE: this function assumes that exists a 'universal' document indexed by 0
so that the intersection will never be empty
and we won't have to do several check on the list lengths
computes the intersection on the elements of the inverted index
input: list of ordered lists of integers
output: a list containing the intersection among the elements of the input lists
NOTE: this algorithm compares the last element of every list instead of the first
so that the last element (which is the first of every list) will always be a match
'''
number_of_words = len(inverted_words)
# an array of indices that points to the last element of every list in inverted_words
pointers = list( map(lambda x: len(x) - 1, inverted_words) )
# creating output set
intersection = []
# j will the index used to navigate the elments of inverted_words
while( pointers[0] >= 0): # the algorithm stops when the first list has been scanned completely
current_element = inverted_words[0][pointers[0]] # we always start comparing the pointed element of the first list
j = 1 # with the pointed element of the second list
while( j < number_of_words): # this cycle only ends when a common element is found
# thus the need for the common 0 element
if current_element > inverted_words[j][ pointers[j] ]: # if the pointed element of this list is smaller than the current element
current_element = decrement_some_pointers(inverted_words, j, pointers) # then I decrement all the previous lists' pointers to match this one
j = 1 # and I restart the cycle from the second list
elif current_element < inverted_words[j][ pointers[j] ]: # if the pointed element of this list is bigger than the current element
j += decrement_one_pointers(inverted_words[j], current_element, pointers, j) # then I need to decrement the pointer of the current list, and if after decrementing
# the pointed element coincides with the current element I go on with the cycle
# otherwise I repeat this cycle and I fall back in the previous if case
else: # if the pointed element of this list is equal to the current element
j+=1 # I go on with the cycle
# I arrive here only if current_element is in every list
intersection.append(current_element) # so I add it to the solution
decrement_all_pointers(pointers) # and I remove it from the lists
return (intersection)
def decrement_one_pointers(inverted_word, document, pointers, i):
'''
input: (ordered list of integer, integer, list of integer, integer)
output: 1 if the integer document is in the list, 0 otherwise
non-explicit output: the function will decrement the value of pointers[i] until
inverted_words[i][ pointers[i] ] <= document
'''
# removes all the elements bigger than document
while(inverted_word[ pointers[i] ] > document ):
pointers[i] -= 1
# checks if document is in inverted_word
if(inverted_word[ pointers[i] ] == document):
return(1)
else:
return(0)
def decrement_some_pointers(inverted_words, word, pointers):
'''
input: (list of lists of ordered integers, integer, list of integers)
output: the greatest element of inverted_words[0] that
is smaller or equal than inverted_words[word][ pointers[word] ]
non-explicit output: takes the first 1 to word lists of inverted_words
and decrements their pointer until every pointed element
is smaller or equal than inverted_words[word][ pointers[word] ]
'''
document = inverted_words[word][ pointers[word] ]
for i in range(word):
decrement_one_pointers(inverted_words[i], document, pointers, i) # decrements pointers[i] until
# inverted_words[i][ pointers[i] ] <= document
return(inverted_words[0][ pointers[0] ])
def decrement_all_pointers(pointers):
'''
input: list of lists of integers
output: None
non-explicit output: decrements every element in the list by 1
'''
for i in range(len(pointers)):
pointers[i] -= 1 # decrement by one the i^th pointer
return
###############################################################################################
# #
# functions used to initialize the unranked search engine and perform the actual search #
# #
###############################################################################################
def unranked_search_engine_initialization():
'''
initialize the unranked search engine by retrieving the inverted index,
the vocabulary and the anime informations from the disk
input: None
output: (vocabulary, inverted index)
'''
# retrieve vocabulary from disk
vocabulary = vocabulary_retrieval()
# retrieve inverted index from disk
inverted_index = unranked_inverted_index_retrieval()
return(vocabulary, inverted_index)
def unranked_search(vocabulary, inverted_index):
'''
this is the actual search engine:
given a query, it will print some brief information about the matching anime
input: vocabulary of words, inverted index
output: None
non-explicit input: the function will ask for a search query
non-explicit output: the function will print some brief information about the anime that matches the query
'''
# retrieving the search query
query = input('Input a query:')
print('')
# preprocessing the query
query = preprocessing(query)
# converting the words in the query into id
query_ids = vocabulary_conversion(query, vocabulary)
# retrieving query inverted lists
inverted_words = []
for i in query_ids:
inverted_words.append(inverted_index[i])
if len(inverted_words) > 0:
# searching query match
search_results = intersection_pointers(inverted_words)
print(f'{len(search_results)-1} search results found!\n')
else:
search_results = [0]
# printing results
unranked_search_result_printing(search_results, 'animeTitle', 'animeDescription', 'Url')
return
def unranked_search_result_printing(anime_idx, *columns):
'''
this function displays the columns information about the anime in anime_idx
it prints a 'Nothing Found' message if the only element in anime_idx is 0
input: list of integer indices, column informations to display
output: None
non-explicit output: prints the search results
'''
# removing the dummy 0 anime document
anime_idx.remove(0)
# taking care of an empty search result
if len(anime_idx) == 0:
print("Couldn't find the query in the document. Try changing the terms or using less words.")
return
# retrieving information about the matching anime
information_df = pd.DataFrame(columns = columns)
for i in anime_idx:
information_df.loc[i] = anime_information_retrieval(i, columns)
# print the informations on screen
display(HTML(information_df.to_html(index = False)))
return
#################################################################################################################################################################################
# #############################################################################
# the following set of functions are used in the ranked search engine which performs #############################################################################
# a conjunctive search and ranks the results based on the similarity with the query #############################################################################
# #############################################################################
#################################################################################################################################################################################
################################################################################################
# #
# functions to create and manage the inverted index and the tfidf vectors #
# #
################################################################################################
def ranked_inverted_index_creation(corpus, vocabulary, unranked_inverted_index):
'''
this function creates the inverted index with the associated tfidf
and the vector of tfidf for all the documents
NOTE: for simplicity of search, every word in our inverted index will
belong to a dummy 0 document with a zero tfidf
NOTE: because we only consider document_ids in an increasing fashion,
our inverted index is automatically sorted
input: list of anime description, vocabulary, unranked_inverted_index
output: (inverted index dictionary of the form {word : [(doc, tfidf), (doc, tfidf), ...], ...},
tfidf vector dictionary of the form {doc_id: {word_id : tfidf, word_id : tfidf, ...}, ...})
'''
# total number of documents
N = len(corpus)
# inverted index dictionary
# we insert the tuple (0,0) in every word
# for convenience in searching
inverted_tf_idf = defaultdict(lambda: [(0,0)])
# tfidf vectors dictionary
tf_idf = defaultdict(dict) # {doc_id:{word_id:tfidf}}
for i in tqdm(range(N), desc = 'Ranked inverted index generation'):
document = corpus[i]
doc_id = i+1
# total number of words in the document
words_count = len(document)
# dictionary of occurrences of every word in the document
counter = Counter(document)
for word in np.unique(document):
word_id = vocabulary[word]
# tf = # occurrences of word / total number of words in the document
tf = counter[word]/words_count
# df = occurrences of (word in document) across all documents
# is equal to the length of the corresponding inverted list (removing the 0 document)
df = len(unranked_inverted_index[word_id])-1
# idf = log10(#number of document / occurrences of (word in document) across all documents)
idf = np.log10(N/df)
tf_idf[doc_id][word_id] = tf*idf
inverted_tf_idf[word_id].append((doc_id,tf*idf))
return(inverted_tf_idf, tf_idf)
def ranked_inverted_index_generation():
'''
NOTE: there are three different tqdm progress bar called in this function
this function generates the tfidf vectors of all the documents and the ranked inverted index
using all the anime descriptions, and saves everything in two json files
'''
# retrieving the descriptions of all the anime
description = column_retrieval('animeDescription')
# processing every every description
list_preprocessing( description )
# retrieving the vocabulary from the disk
vocabulary = vocabulary_retrieval()
# generating the ranked inverted index
# and the tfidf vectors
inverted_index, tfidf = ranked_inverted_index_creation(description, vocabulary, unranked_inverted_index_retrieval())
# saving the tfidf vectors to the disk
with open("content/tfidf_vectors.json", 'w') as f:
json.dump(tfidf, f)
# saving the inverted index to the disk
with open("content/ranked_inverted_index.json", 'w') as f:
json.dump(inverted_index, f)
return
def ranked_inverted_index_retrieval():
'''
this function reads the inverted index from the disk
and returns it as a dictionary
input: None
output: inverted index dictionary
'''
inverted_index = json_loading('./content/ranked_inverted_index.json')
# because the json loading parses our integer keys as strings
# we need to cast them
for key in list(inverted_index.keys()):
inverted_index[int(key)] = inverted_index.pop(key)
return(inverted_index)
def tfidf_vector_retrieval():
'''
this function reads the tfidf vectors from the disk
and returns it as a dictionary
input: None
output: tfidf vectors dictionary
'''
tfidf = json_loading('./content/tfidf_vectors.json')
# because the json loading parses our integer keys as strings
# we need to cast them
for key in list(tfidf.keys()):
for inner_key in list(tfidf[key].keys()):
tfidf[key][int(inner_key)] = tfidf[key].pop(inner_key)
tfidf[int(key)] = tfidf.pop(key)
return(tfidf)
def vectorize_query(query_ids, inverted_words, total_documents):
'''
this function generates the vector of tfidf relative to the query
input: (list of word id in the query,
slice of inverted index needed to compute the tfidf,
total number of documents)
output: vector of tfidf relative to the query, it will be a dictionary
of the form {word1 : tfidf1query, word2 : tfidf2query, ...}
'''
# total number of words in the query
words_count = len(query_ids)
# initializing output vector
tfidf_vector = {}
# counting the occurrences of every word in the query
occurrences_count = Counter(query_ids)
for word in occurrences_count.keys():
# calculating the tfidf as -----------------------idf_word-------------------- * -----------tf_word,query-----------
tfidf_vector[word] = np.log10(total_documents/len(inverted_words[word])) * occurrences_count[word]/words_count
return(tfidf_vector)
###############################################################################################
# #
# functions used to intersect two or more elements of the inverted index #
# #
###############################################################################################
def ranked_intersection_pointers(inverted_words):
'''
NOTE: this function assumes that exists a 'universal' document indexed by 0
so that the intersection will never be empty
and we won't have to do several check on the list lengths
computes the intersection on the elements of the inverted index
input: dictionary of lists of (document_id, tfidf) ordered by the first element
output: the tfidf partial vector of the documents in the input dictionary
it will be a dictionary of the form
{document1 : {word1 : tfidf11, word2 : tfidf21, ...}, document2 : {word1 : tfidf12, word2 : tfidf22, ...}, ...}
where word is a word that is both in the query and in the document
NOTE: this algorithm compares the last element of every list instead of the first
so that the last element (which is the first of every list) will always be a match
'''
# retrieving word list
words = list(inverted_words.keys())
# setting total number of lists to intersect
number_of_words = len(words)
# defining the pointers dictionary
pointers = {}
# setting the pointers to the last element of every list
for word in words:
pointers[word] = len(inverted_words[word]) - 1
# initializing the output vectors as a dictionary with
# an empty dictionary as default value
document_vectors = defaultdict(dict)
# the cycle will stop when when the first list has been scanned completely
# because the lists have in common the 0 document, the pointers will
# become negative all at the same time
while( pointers[words[0]] >= 0):
current_element = inverted_words[words[0]][ pointers[words[0]] ][0] # we always start comparing the pointed element of the first list
j = 1 # with the pointed element of the second list
while( j < number_of_words): # this cycle only ends when a common element is found
# thus the need for the common 0 element
if current_element > inverted_words[words[j]][ pointers[words[j]] ][0]: # if the pointed element of this list is smaller than the current element
current_element = ranked_decrement_some_pointers(inverted_words, words, j, pointers) # then I decrement all the previous lists' pointers to match this one
j = 1 # and I restart the cycle from the second list
elif current_element < inverted_words[words[j]][ pointers[words[j]] ][0]: # if the pointed element of this list is bigger than the current element
j += ranked_decrement_one_pointers(inverted_words[words[j]], current_element, pointers, words[j]) # then I need to decrement the pointer of the current list, and if after decrementing
# the pointed element coincides with the current element I go on with the cycle
# otherwise I repeat this cycle and I fall back in the previous if case
else: # if the pointed element of this list is equal to the current element
j+=1 # I go on with the cycle
# populating the output dictionary with the tfidf
for word in words: # I arrive here only if current_element is in every list
document_vectors[ current_element ][word] = inverted_words[word][ pointers[word] ][1] # so I add it to the solution
ranked_decrement_all_pointers(pointers, words) # and I remove it from the lists
# removing the dummy 0 anime document
document_vectors.pop(0)
return (document_vectors)
def ranked_decrement_one_pointers(inverted_word, document, pointers, idx):
'''
input: (list element of the inverted index, integer, dictionary of pointers, key of the list)
output: 1 if the integer document is in the list, 0 otherwise
non-explicit output: the function will decrement the value of pointers[idx] until
inverted_word[ pointers[idx] ] <= document
'''
# removes all the elements bigger than document
while(inverted_word[ pointers[idx] ][0] > document ):
pointers[idx] -= 1
# checks if document is in inverted_word
if(inverted_word[ pointers[idx] ][0] == document):
return(1)
else:
return(0)
def ranked_decrement_some_pointers(inverted_words, words, j, pointers):
'''
input: (subdictionary of the inverted index, keys of the inverted index, integer, dictionary of pointers)
output: the greatest doc_id of inverted_words[words[0]] that
is smaller or equal than inverted_words[words[j]][ pointers[words[j]] ]
non-explicit output: takes the first 1 to j lists of inverted_words
and decrements their pointer until every pointed element
is smaller or equal than inverted_words[words[j]][ pointers[words[j]] ]
'''
document = inverted_words[words[j]][ pointers[words[j]] ][0]
for i in range(j):
ranked_decrement_one_pointers(inverted_words[ words[i] ], document, pointers, words[i]) # decrements pointers[words[i]] until
# inverted_words[words[i]][ pointers[words[i]] ] <= document
return( inverted_words[ words[0] ][ pointers[words[0]] ][0])
def ranked_decrement_all_pointers(pointers, words):
'''
input: list of lists of integers
output: None
non-explicit output: decrements every element in the list by 1
'''
for i in words:
pointers[i] -= 1 # decrement by one the i^th pointer
return
###############################################################################################
# #
# functions used to initialize the ranked search engine and perform the actual search #
# #
###############################################################################################
def ranked_search_engine_initialization():
'''
initialize the ranked search engine by retrieving the inverted index,
the vocabulary and the anime informations from the disk
input: None
output: (vocabulary, inverted index, tfidf vector, total number of document)
'''
# retrieve vocabulary from disk
vocabulary = vocabulary_retrieval()
# retrieve inverted index from disk
inverted_index = ranked_inverted_index_retrieval()
# retrieve tfidf vectors
tfidf_vectors = tfidf_vector_retrieval()
# retrieve total number of anime
total_document = anime_count()
return(vocabulary, inverted_index, tfidf_vectors, total_document)
def ranked_search(vocabulary, inverted_index, tfidf_dict, total_documents, k = 5):
'''
this is the actual search engine:
given a query, it will print some brief information
about the matching anime in order of similarity with the query
input: (vocabulary of words, inverted index, tfidf vectors,
total number of documents, number of search match to display)
output: None
non-explicit input: the function will ask for a search query
non-explicit output: the function will print some brief information
about the anime that matches the query
'''
# retrieving the search query
query = input('Input a query:')
print('')
# preprocessing the query
query = preprocessing(query)
# converting the words in the query into id
query_ids = vocabulary_conversion(query, vocabulary)
# retrieving query inverted lists
inverted_words = {}
for i in query_ids:
inverted_words[i] = inverted_index[i]
if len(inverted_words) > 0:
# vectorizing the query
vectorized_query = vectorize_query(query_ids, inverted_words, total_documents)
# vectorizing all the documents that have
# some words in common with the query
vectorized_documents = ranked_intersection_pointers(inverted_words)
else:
# if len(inverted_words) == 0 none of the words
# in the query are present in our vocabulary
print("Couldn't find the query in the document. Try changing the terms or using less words.")
return
# finding the top_k list of documents
top_k = find_top_k(tfidf_dict, vectorized_documents, vectorized_query, k)
# printing search results
ranked_search_result_printing(top_k, 'animeTitle', 'animeDescription', 'Url')
return
def find_top_k(tfidf_dict, vec_documents, vec_query, k):
'''
this function finds the top k document in
vec_documents based on the cosine similarity with the query
using an minheap (from the heapq library) to keep the top k
input: (dictionary of tfidf document vectors, partial tfidf document vectors (they only contain the word that matches the query)
tfidf vector of the query, k)
output: list of tuples of the form (document_id, cosine_similarity)
'''
# initializing the document list
heap = []
# retrieving the ids of the document to compare
documents_ids = vec_documents.keys()
for document in documents_ids:
# computing similarity between current document and query
similarity = cosine_similarity(tfidf_dict[document], vec_query, vec_documents[document])
# inserting the tuple (similarity, document) in the heap
heapq.heappush(heap, (similarity, document))
if len(heap) > k:
# if the heap contains more than k documents
# we remove the smallest one (according to similarity)
heapq.heappop(heap)
return(heap)
def cosine_similarity(complete_vec_doc, vec_query, vec_doc):
'''
NOTE: order of input IS important, because
we ignore the norm of the query in this calculation
NOTE: because of the fact that we ignore the norm of the query
in this calculation, the result will not be the actual cosine
similarity, but it will maintain the ordering relationship
between the other similarities
input: (total document vector, query vector, intersection document vector)
output: cosine similarity between the two input
(modulo the constant factor of the query norm)
'''
# computing the norm of the input document
doc_norm = np.linalg.norm(list(complete_vec_doc.values()))
dot_product = 0
for word in vec_query.keys():
dot_product += vec_query[word]*vec_doc[word]
return(dot_product/doc_norm)
def ranked_search_result_printing(top_k, *columns):
'''
this function displays the columns information about the anime in top_k
input: (list of tuples of the form (document_id, cosine_similarity),
column informations to display)
output: None
non-explicit output: prints the search results in order of similarity
'''
# initializing the dataframe that will contain the anime informations
information_df = pd.DataFrame(columns = columns+('Similarity',))
for couples in top_k:
anime_id = couples[1]
# retrieving information about the matching anime
information_df.loc[anime_id] = anime_information_retrieval(anime_id, columns) + [couples[0]]
# sorting the search result by similarity
information_df = information_df.sort_values(by='Similarity', ascending=False)
# print the informations on screen
display(HTML(information_df.to_html(index = False)))
return |
import os
import shutil
import tempfile
import numpy as np
import copy
import pytest
import unittest
import tables
import asdf
from astropy.tests.helper import remote_data
from astropy import constants as const
from astropy.table import Table
from astropy.io import fits
from beast.physicsmodel.stars.isochrone import ezIsoch
from beast.physicsmodel.grid import SpectralGrid, SEDGrid
from beast.physicsmodel.model_grid import (
make_iso_table,
make_spectral_grid,
add_stellar_priors,
make_extinguished_sed_grid,
)
from beast.observationmodel.noisemodel import generic_noisemodel as noisemodel
from beast.observationmodel.ast import make_ast_input_list
from beast.observationmodel.observations import Observations, gen_SimObs_from_sedgrid
from beast.fitting.trim_grid import trim_models
from beast.fitting import fit
from beast.tools import (
get_libfiles,
beast_settings,
calc_depth_from_completeness,
subgridding_tools,
star_type_probability,
)
from beast.tools.read_beast_data import (
read_lnp_data,
read_noise_data,
read_sed_data,
get_lnp_grid_vals,
)
from beast.tools.compare_spec_type import compare_spec_type
from beast.tools.run import create_physicsmodel, create_obsmodel
from beast.tests.helpers import (
download_rename,
compare_hdf5,
compare_tables,
compare_fits,
)
@remote_data
class TestRegressionSuite(unittest.TestCase):
"""
The regression tests are done in a class to so that files are only
downloaded once and can be used by multiple tests.
"""
@classmethod
def setUpClass(cls):
# download the BEAST library files
get_libfiles.get_libfiles()
cls.dset = "metal"
if cls.dset == "metal":
cls.basesubdir = "metal_small_16Apr21/"
cls.basename = f"{cls.basesubdir}beast_metal_small"
cls.obsname = f"{cls.basesubdir}14675_LMC-13361nw-11112.gst_samp.fits"
cls.astname = f"{cls.basesubdir}14675_LMC-13361nw-11112.gst.fake.fits"
# download the cached version for use and comparision
# - photometry and ASTs
cls.obs_fname_cache = download_rename(cls.obsname)
cls.asts_fname_cache = download_rename(cls.astname)
# - isochrones
cls.iso_fname_cache = download_rename(f"{cls.basename}_iso.csv")
# - spectra
# - spectra
cls.spec_fname_cache = download_rename(f"{cls.basename}_spec_grid.hd5")
# - spectra with priors
cls.priors_fname_cache = download_rename(
f"{cls.basename}_spec_w_priors.grid.hd5"
)
cls.priors_sub0_fname_cache = download_rename(
f"{cls.basename}_subgrids_spec_w_priors.gridsub0.hd5"
)
cls.priors_sub1_fname_cache = download_rename(
f"{cls.basename}_subgrids_spec_w_priors.gridsub1.hd5"
)
# - SED grids
cls.seds_fname_cache = download_rename(f"{cls.basename}_seds.grid.hd5")
cls.seds_sub0_fname_cache = download_rename(
f"{cls.basename}_subgrids_seds.gridsub0.hd5"
)
cls.seds_sub1_fname_cache = download_rename(
f"{cls.basename}_subgrids_seds.gridsub1.hd5"
)
# - noise model
cls.noise_fname_cache = download_rename(f"{cls.basename}_noisemodel.grid.hd5")
cls.noise_sub0_fname_cache = download_rename(
f"{cls.basename}_subgrids_noisemodel.gridsub0.hd5"
)
cls.noise_sub1_fname_cache = download_rename(
f"{cls.basename}_subgrids_noisemodel.gridsub1.hd5"
)
# - trimmed files
cls.noise_trim_fname_cache = download_rename(
f"{cls.basename}_noisemodel_trim.grid.hd5"
)
cls.seds_trim_fname_cache = download_rename(
f"{cls.basename}_seds_trim.grid.hd5"
)
# - output files
cls.stats_fname_cache = download_rename(f"{cls.basename}_stats.fits")
cls.lnp_fname_cache = download_rename(f"{cls.basename}_lnp.hd5")
cls.pdf1d_fname_cache = download_rename(f"{cls.basename}_pdf1d.fits")
cls.pdf2d_fname_cache = download_rename(f"{cls.basename}_pdf2d.fits")
# create the beast_settings object
# (copied over from the metal_small example in beast-examples)
cls.settings_fname_cache = download_rename(
f"{cls.basesubdir}beast_settings.txt"
)
cls.settings = beast_settings.beast_settings(cls.settings_fname_cache)
# update names of photometry and AST files
cls.settings.obsfile = cls.obs_fname_cache
cls.settings.astfile = cls.asts_fname_cache
# also make a version with 2 subgrids
cls.settings_sg = copy.deepcopy(cls.settings)
cls.settings_sg.n_subgrid = 2
cls.settings_sg.project = f"{cls.settings.project}_subgrids"
# ###################################################################
# Standard BEAST fitting steps
def test_padova_isochrone_download(self):
"""
Generate the padova isochrone table and compare the result to a cached version.
"""
# download the file live from the website
savename = tempfile.NamedTemporaryFile(suffix=".csv").name
infoname = tempfile.NamedTemporaryFile(suffix=".asdf").name
(iso_fname, g) = make_iso_table(
"test",
iso_fname=savename,
logtmin=self.settings.logt[0],
logtmax=self.settings.logt[1],
dlogt=self.settings.logt[2],
z=self.settings.z,
info_fname=infoname,
)
# read the cached and new tables using astropy tables
table_cache = Table.read(
self.iso_fname_cache, format="ascii.csv", comment="#", delimiter=","
)
table_new = Table.read(
iso_fname, format="ascii.csv", comment="#", delimiter=","
)
# compare
compare_tables(table_cache, table_new)
def test_make_kurucz_tlusty_spectral_grid(self):
"""
Generate the spectral grid based on Kurucz and Tlusty stellar atmosphere
models based on a cached set of isochrones and compare the result to a cached
version.
"""
# read in the cached isochrones
oiso = ezIsoch(self.iso_fname_cache)
# calculate the redshift
redshift = (self.settings.velocity / const.c).decompose().value
# make the spectral grid
spec_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
(spec_fname, g) = make_spectral_grid(
"test",
oiso,
osl=self.settings.osl,
redshift=redshift,
distance=self.settings.distances,
distance_unit=self.settings.distance_unit,
spec_fname=spec_fname,
# filterLib=filter_fname,
extLaw=self.settings.extLaw,
add_spectral_properties_kwargs=self.settings.add_spectral_properties_kwargs,
)
# compare the new to the cached version
compare_hdf5(self.spec_fname_cache, spec_fname)
def test_add_stellar_priors_to_spectral_grid(self):
"""
Add the stellar priors to the a cached spectral grid and compare
it to the cached version.
"""
specgrid = SpectralGrid(self.spec_fname_cache, backend="memory")
priors_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
infoname = tempfile.NamedTemporaryFile(suffix=".asdf").name
priors_fname, g = add_stellar_priors(
"test",
specgrid,
priors_fname=priors_fname,
age_prior_model=self.settings.age_prior_model,
mass_prior_model=self.settings.mass_prior_model,
met_prior_model=self.settings.met_prior_model,
distance_prior_model=self.settings.distance_prior_model,
info_fname=infoname,
)
# compare the new to the cached version
compare_hdf5(self.priors_fname_cache, priors_fname)
def test_make_extinguished_sed_grid(self):
"""
Generate the extinguished SED grid using a cached version of the
spectral grid with priors and compare the result to a cached version.
"""
g_pspec = SpectralGrid(self.priors_fname_cache, backend="memory")
# generate the SED grid by integrating the filter response functions
# effect of dust extinction applied before filter integration
# also computes the dust priors as weights
seds_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
infoname = tempfile.NamedTemporaryFile(suffix=".asdf").name
(seds_fname, g) = make_extinguished_sed_grid(
"test",
g_pspec,
self.settings.filters,
seds_fname=seds_fname,
extLaw=self.settings.extLaw,
av=self.settings.avs,
rv=self.settings.rvs,
fA=self.settings.fAs,
rv_prior_model=self.settings.rv_prior_model,
av_prior_model=self.settings.av_prior_model,
fA_prior_model=self.settings.fA_prior_model,
add_spectral_properties_kwargs=self.settings.add_spectral_properties_kwargs,
info_fname=infoname,
)
# compare the new to the cached version
compare_hdf5(self.seds_fname_cache, seds_fname)
def test_toothpick_noisemodel(self):
"""
Generate the nosiemodel (aka observationmodel) using a cached version of
the artifical star test results (ASTs) and compare the result to a cached
version.
"""
# get the modelsedgrid on which to generate the noisemodel
modelsedgrid = SEDGrid(self.seds_fname_cache)
# generate the AST noise model
noise_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
noisemodel.make_toothpick_noise_model(
noise_fname,
self.asts_fname_cache,
modelsedgrid,
absflux_a_matrix=self.settings.absflux_a_matrix,
)
# compare the new to the cached version
compare_hdf5(self.noise_fname_cache, noise_fname)
def test_trim_grid(self):
"""
Generate trim the sed grid and noise model using cached versions of the
both and compare the result to a cached version.
"""
# read in the observed data
obsdata = Observations(
self.obs_fname_cache, self.settings.filters, self.settings.obs_colnames
)
# get the modesedgrid
modelsedgrid = SEDGrid(self.seds_fname_cache)
# read in the noise model just created
noisemodel_vals = noisemodel.get_noisemodelcat(self.noise_fname_cache)
# trim the model sedgrid
seds_trim_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
noise_trim_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
trim_models(
modelsedgrid,
noisemodel_vals,
obsdata,
seds_trim_fname,
noise_trim_fname,
sigma_fac=3.0,
)
# compare the new to the cached version
compare_hdf5(self.seds_trim_fname_cache, seds_trim_fname, ctype="seds")
compare_hdf5(self.noise_trim_fname_cache, noise_trim_fname, ctype="noise")
def test_fit_grid(self):
"""
Fit a cached version of the observations with cached version of the
trimmed sed grid and noisemodel and compare the result to cached
versions of the stats and pdf1d files.
"""
# read in the the AST noise model
noisemodel_vals = noisemodel.get_noisemodelcat(self.noise_trim_fname_cache)
# read in the observed data
obsdata = Observations(
self.obs_fname_cache, self.settings.filters, self.settings.obs_colnames
)
# output files
stats_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
pdf1d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
pdf2d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
lnp_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
fit.summary_table_memory(
obsdata,
noisemodel_vals,
self.seds_trim_fname_cache,
threshold=-10.0,
save_every_npts=100,
lnp_npts=500,
max_nbins=200,
stats_outname=stats_fname,
pdf1d_outname=pdf1d_fname,
pdf2d_outname=pdf2d_fname,
pdf2d_param_list=["Av", "M_ini", "logT"],
lnp_outname=lnp_fname,
surveyname=self.settings.surveyname,
)
# check that the stats files are exactly the same
table_cache = Table.read(self.stats_fname_cache)
table_new = Table.read(stats_fname)
compare_tables(table_cache, table_new)
# lnp files not checked as they are randomly sparsely sampled
# hence will be different every time the fitting is run
# check that the pdf1d/pdf2d files are exactly the same
compare_fits(self.pdf1d_fname_cache, pdf1d_fname)
compare_fits(self.pdf2d_fname_cache, pdf2d_fname)
# ###################################################################
# AST tests
@pytest.mark.skip(
reason="need filters info: get from sed grid - will have to download"
)
def test_ast_pick_models(self):
"""
Generate the artifial star test (AST) inputs using a cached version of
the sed grid and compare the result to a cached version.
"""
# download files specific to this test
cached_table_filename = download_rename("phat_small/cache_inputAST.txt")
mag_cuts = [1.0]
seds_fname = self.seds_fname_cache
if self.dset != "phat":
seds_fname = download_rename("phat_small/beast_example_phat_seds.grid.hd5")
else:
seds_fname = self.seds_fname_cache
outname = tempfile.NamedTemporaryFile(suffix=".txt").name
make_ast_input_list.pick_models(
seds_fname, self.settings.filters, mag_cuts, outfile=outname, ranseed=1234,
)
table_new = Table.read(outname, format="ascii")
# download cached version of the file and compare it to new file
table_cache = Table.read(cached_table_filename, format="csv", delimiter=" ")
compare_tables(table_new, table_cache)
# ###################################################################
# simulation tests
@pytest.mark.skip(reason="updated cached file needed")
def test_simobs(self):
"""
Simulate observations using cached versions of the sed grid and noise model
and compare the result to a cached version.
"""
# download files specific to this test
simobs_fname_cache = download_rename("beast_example_phat_simobs.fits")
# get the physics model grid - includes priors
modelsedgrid = SEDGrid(self.seds_fname_cache)
# read in the noise model - includes bias, unc, and completeness
noisegrid = noisemodel.get_noisemodelcat(self.noise_fname_cache)
table_new = gen_SimObs_from_sedgrid(
modelsedgrid, noisegrid, nsim=100, compl_filter="max", ranseed=1234,
)
# check that the simobs files are exactly the same
table_cache = Table.read(simobs_fname_cache)
# to avoid issues with uppercase vs lowercase column names, make them all
# the same before comparing
for col in table_new.colnames:
table_new[col].name = col.upper()
for col in table_cache.colnames:
table_cache[col].name = col.upper()
compare_tables(table_cache, table_new)
# ###################################################################
# tools tests
@pytest.mark.skip(reason="not working")
def test_read_lnp_data(self):
"""
Read in the lnp data from a cached file and test that selected values
are as expected.
"""
ldata = read_lnp_data(self.lnp_fname_cache)
exp_keys = ["vals", "indxs"]
for ckey in ldata.keys():
assert ckey in exp_keys, f"{ckey} not in lnp data expected keys"
# check an entry for a single model (caching current values 20 Apr 2020)
# fmt: off
exp_vals = [-56.83604431, -76.34762573, -17.55770874, -18.23323059, -10.53744507]
exp_indxs = [14639., 15015., 296., 12636., 1336.]
# fmt: on
np.testing.assert_allclose(
ldata["vals"][0][0:5],
exp_vals,
err_msg="Expected posterior (vals) values not correct",
)
np.testing.assert_allclose(
ldata["indxs"][0][0:5],
exp_indxs,
err_msg="Expected index values not correct",
)
def test_read_noise_data(self):
"""
Read in the noise model from a cached file and test that selected values
are as expected.
"""
ndata = read_noise_data(self.noise_trim_fname_cache)
exp_keys = ["bias", "completeness", "error"]
for ckey in ndata.keys():
assert ckey in exp_keys, f"{ckey} not in noise data expected keys"
assert np.all(
(ndata["bias"] >= -1e-10) & (ndata["bias"] <= 1e-10)
), "bias values not between -1e-10 and 1e-10"
assert np.all(
(ndata["error"] >= -1e-10) & (ndata["error"] <= 1e-10)
), "error values not between -1e-10 and 1e-10"
assert np.all(
(ndata["completeness"] >= 0.0) & (ndata["completeness"] <= 1.0)
), "completeness values not between 0 and 1"
@pytest.mark.skip(reason="updated cached values needed")
def test_read_sed_data(self):
"""
Read in the sed grid from a cached file and test that selected values
are as expected.
"""
requested_params = ["Av", "Rv", "f_A", "M_ini", "logA", "Z", "distance"]
# check that when return_params=True, then just a list of parameters is returned
sparams = read_sed_data(self.seds_trim_fname_cache, return_params=True)
assert isinstance(sparams, list), "Returned params are not a list"
checknames = requested_params + ["seds", "lamb"]
for cname in checknames:
assert cname in sparams, f"{cname} not in sed parameter list"
# check that otherwise, the requested sed data is returned
sdata = read_sed_data(self.seds_trim_fname_cache, param_list=requested_params)
expected_values = {
"Av": 0.0,
"Rv": 2.0,
"f_A": 1.0,
"M_ini": 4.0073261261,
"logA": 6.0,
"Z": 0.008,
"distance": 783429.642766212,
}
for cname in requested_params:
assert cname in sdata.keys(), f"requsted parameter {cname} not in sed data"
np.testing.assert_allclose(
sdata[cname][10],
expected_values[cname],
err_msg=f"expected value of {cname} is not found",
)
@pytest.mark.skip(reason="updated cached values needed")
def test_get_lnp_grid_vals(self):
"""
Read in the lnp and sed grid data from cached files and test that
selected values are as expected.
"""
ldata = read_lnp_data(self.lnp_fname_cache)
requested_params = ["Av", "Rv", "f_A", "M_ini", "logA", "Z", "distance"]
sdata = read_sed_data(self.seds_trim_fname_cache, param_list=requested_params)
lgvals_data = get_lnp_grid_vals(sdata, ldata)
# check that otherwise, the requested lgvals data is returned
expected_values = {
"Av": [0.0, 0.0, 0.0, 0.0, 0.0],
"Rv": [2.0, 2.0, 2.0, 2.0, 2.0],
"f_A": [1.0, 1.0, 1.0, 1.0, 1.0],
"M_ini": [3.89416909, 3.92726111, 3.95603228, 2.04966068, 2.04999995],
"logA": [6.0, 6.0, 6.0, 9.0, 9.0],
"Z": [0.03, 0.03, 0.03, 0.004, 0.004],
"distance": [
783429.64276621,
783429.64276621,
783429.64276621,
783429.64276621,
783429.64276621,
],
}
for cname in requested_params:
assert (
cname in lgvals_data.keys()
), f"requsted parameter {cname} not in sed data"
np.testing.assert_allclose(
lgvals_data[cname][0:5, 10],
expected_values[cname],
err_msg=f"expected value of {cname} is not found",
)
@pytest.mark.skip(reason="failing, not sure why")
def test_split_grid(self):
"""
Split a cached version of a sed grid with various into a few different
subgrids and check the splits are as expected.
"""
split_and_check(self.seds_trim_fname_cache, 4) # an edge case
split_and_check(self.seds_trim_fname_cache, 3) # an odd numer
split_and_check(self.seds_trim_fname_cache, 1) # an even number
def test_reduce_grid_info(self):
"""
Split a cached version of a sed grid and check that [not quite
sure what this is checking - details needed].
"""
sub_fnames = subgridding_tools.split_grid(self.seds_trim_fname_cache, 3)
complete_g_info = subgridding_tools.subgrid_info(self.seds_trim_fname_cache)
cap_unique = 50
sub_g_info = subgridding_tools.reduce_grid_info(
sub_fnames, nprocs=3, cap_unique=cap_unique
)
for q in complete_g_info:
if q not in sub_g_info:
raise AssertionError()
if not complete_g_info[q]["min"] == sub_g_info[q]["min"]:
raise AssertionError()
if not complete_g_info[q]["max"] == sub_g_info[q]["max"]:
raise AssertionError()
num_unique = len(complete_g_info[q]["unique"])
if num_unique > cap_unique:
# Cpan still be larger if one of the sub results during the
# reduction is larger. This is as intended.
if not sub_g_info[q]["num_unique"] >= cap_unique:
raise AssertionError()
else:
if not sub_g_info[q]["num_unique"] == num_unique:
raise AssertionError()
@pytest.mark.skip(reason="failing, not sure why")
def test_merge_pdf1d_stats(self):
"""
Using cached versions of the observations, sed grid, and noise model,
split the grids and do the fitting on the subgrids and original
grid. Merge the results from the subgrids and compare to the results
from fitting the full grid.
"""
######################################
# STEP 1: GET SOME DATA TO WORK WITH #
######################################
# read in the observed data
obsdata = Observations(
self.obs_fname_cache, self.settings.filters, self.settings.obs_colnames
)
#########################################################################################
# STEP 2: SPLIT THE GRIDS AND GENERATE THE GRID INFO DICT AS IN THE SUBGRIDDING EXAMPLE #
#########################################################################################
num_subgrids = 3
# Split SED grid
sub_seds_trim_fnames = subgridding_tools.split_grid(
self.seds_trim_fname_cache, num_subgrids, overwrite=True
)
# Split noise grid (a standardized function does not exist)
sub_noise_trim_fnames = []
noisemodel_vals = noisemodel.get_noisemodelcat(self.noise_trim_fname_cache)
slices = subgridding_tools.uniform_slices(
len(noisemodel_vals["bias"]), num_subgrids
)
for i, slc in enumerate(slices):
outname = self.noise_trim_fname_cache.replace(".hd5", "sub{}.hd5".format(i))
with tables.open_file(outname, "w") as outfile:
outfile.create_array(outfile.root, "bias", noisemodel_vals["bias"][slc])
outfile.create_array(
outfile.root, "error", noisemodel_vals["error"][slc]
)
outfile.create_array(
outfile.root, "completeness", noisemodel_vals["completeness"][slc]
)
sub_noise_trim_fnames.append(outname)
# Collect information about the parameter rangers, to make the pdf1d bins
# consistent between subgrids
grid_info_dict = subgridding_tools.reduce_grid_info(
sub_seds_trim_fnames, sub_noise_trim_fnames, nprocs=1, cap_unique=100
)
##################################################
# STEP 3: GENERATE FILENAMES AND RUN THE FITTING #
##################################################
def make_gridsub_fnames(base_fname, num_subgrids, extension=".fits"):
return [
base_fname.replace(extension, "gridsub{}{}".format(i, extension))
for i in range(num_subgrids)
]
stats_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
pdf1d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
lnp_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
subgrid_pdf1d_fnames = make_gridsub_fnames(pdf1d_fname, num_subgrids)
subgrid_stats_fnames = make_gridsub_fnames(stats_fname, num_subgrids)
subgrid_lnp_fnames = make_gridsub_fnames(
lnp_fname, num_subgrids, extension=".hd5"
)
for i in range(num_subgrids):
sub_noisemodel_vals = noisemodel.get_noisemodelcat(sub_noise_trim_fnames[i])
fit.summary_table_memory(
obsdata,
sub_noisemodel_vals,
sub_seds_trim_fnames[i],
threshold=-40.0,
save_every_npts=100,
lnp_npts=500,
stats_outname=subgrid_stats_fnames[i],
pdf1d_outname=subgrid_pdf1d_fnames[i],
lnp_outname=subgrid_lnp_fnames[i],
grid_info_dict=grid_info_dict,
do_not_normalize=True,
)
# The do_not_normalize option is absolutely crucial!
# Now merge the results
merged_pdf1d_fname, merged_stats_fname = subgridding_tools.merge_pdf1d_stats(
subgrid_pdf1d_fnames, subgrid_stats_fnames
)
# Do a full fit also
normal_stats = tempfile.NamedTemporaryFile(suffix=".fits").name
normal_pdf1d = tempfile.NamedTemporaryFile(suffix=".fits").name
normal_lnp = tempfile.NamedTemporaryFile(suffix=".hd5").name
fit.summary_table_memory(
obsdata,
noisemodel_vals,
self.seds_trim_fname_cache,
threshold=-40.0,
save_every_npts=100,
lnp_npts=500,
stats_outname=normal_stats,
pdf1d_outname=normal_pdf1d,
lnp_outname=normal_lnp,
do_not_normalize=True,
)
# Here, we also need to use do_not_normalize, otherwise Pmax will be
# different by a factor
# CHECKS
tolerance = 1e-6
fits_normal = fits.open(normal_pdf1d)
fits_new = fits.open(merged_pdf1d_fname)
if not len(fits_new) == len(fits_normal):
raise AssertionError()
# A similar problem to the above will also occur here
for k in range(1, len(fits_new)):
qname = fits_new[k].header["EXTNAME"]
np.testing.assert_allclose(
fits_new[k].data,
fits_normal[qname].data,
rtol=tolerance,
atol=tolerance,
)
table_normal = Table.read(normal_stats)
table_new = Table.read(merged_stats_fname)
if not len(table_normal) == len(table_new):
raise AssertionError()
# These will normally fail, as the merging process can not be made
# bit-correct due do floating point math (exacerbated by exponentials)
for c in table_new.colnames:
if c == "Name" or c == "RA" or c == "DEC":
np.testing.assert_equal(
table_normal[c],
table_new[c],
err_msg="column {} is not equal".format(c),
)
else:
np.testing.assert_allclose(
table_normal[c],
table_new[c],
rtol=tolerance,
equal_nan=True,
err_msg="column {} is not close enough".format(c),
)
def test_beast_settings(self):
"""
Test that a given text file creates the expected beast_settings class.
"""
# assert it's the correct class
assert isinstance(
self.settings, beast_settings.beast_settings
), "Did not produce the correct class"
def test_compare_spec_type_inFOV(self):
"""
Test for compare_spec_type. Inputs and expected outputs created by
running generate_files_for_tests.py in beast-examples/metal_small.
In this version, the stars are in the imaging field of view.
"""
# download cached file
compare_spec_type_fname = download_rename(
f"{self.basename}_compare_spec_type.asdf"
)
with asdf.open(compare_spec_type_fname) as af:
compare_spec_type_info = copy.deepcopy(af.tree)
# run compare_spec_type
spec_type = compare_spec_type(
self.obs_fname_cache,
self.stats_fname_cache,
**compare_spec_type_info["input"],
)
# expected output table
expected_table = Table(compare_spec_type_info["output"])
# compare to new table
compare_tables(expected_table, Table(spec_type), rtol=2e-3)
def test_compare_spec_type_notFOV(self):
"""
Test for compare_spec_type. In this version, the stars are NOT in the
imaging field of view.
"""
# run compare_spec_type
spec_type = compare_spec_type(
self.obs_fname_cache,
self.stats_fname_cache,
[1.0], # RA
[1.0], # Dec
["B"], # Spectral type
[4], # Subtype
["V"], # Luminosity class
match_radius=0.2, # Match radius (arcsec)
)
# expected output table
expected_table = Table(
{
"spec_ra": [1.0],
"spec_dec": [1.0],
"spec_type": ["B 4 V"],
"spec_teff": [None],
"spec_logg": [None],
"phot_cat_ind": [None],
"stats_cat_ind": [None],
"beast_teff_p50": [None],
"beast_teff_p16": [None],
"beast_teff_p84": [None],
"beast_logg_p50": [None],
"beast_logg_p16": [None],
"beast_logg_p84": [None],
"teff_sigma": [None],
"logg_sigma": [None],
}
)
# compare to new table
compare_tables(expected_table, Table(spec_type))
def test_star_type_probability_all_params(self):
"""
Test for star_type_probability. Inputs and expected outputs created by
running generate_files_for_tests.py in beast-examples/metal_small.
In this version, all required parameters are present.
"""
# download cached file
star_prob_fname = download_rename(f"{self.basename}_star_type_probability.asdf")
with asdf.open(star_prob_fname) as af:
star_prob_info = copy.deepcopy(af.tree)
# run star_type_probability
star_prob = star_type_probability.star_type_probability(
self.pdf1d_fname_cache, self.pdf2d_fname_cache, **star_prob_info["input"],
)
# expected output table
expected_star_prob = Table(star_prob_info["output"])
# compare to new table
compare_tables(expected_star_prob, Table(star_prob))
def test_star_type_probability_no_Av(self):
"""
Test for star_type_probability.
In this version, A_V was not saved in the 2D PDFs.
"""
# download cached file
star_prob_fname = download_rename(f"{self.basename}_star_type_probability.asdf")
with asdf.open(star_prob_fname) as af:
star_prob_info = copy.deepcopy(af.tree)
# edit the 2D PDF file to not have A_V info
temp_pdf2d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
temp_hdu_list = []
with fits.open(self.pdf2d_fname_cache) as hdu:
for ext in hdu:
if "Av+" in ext.name or "+Av" in ext.name:
continue
temp_hdu_list.append(ext)
fits.HDUList(temp_hdu_list).writeto(temp_pdf2d_fname)
# edit the expected output to have NaNs in columns that require A_V
# (currently, that's all columns)
expected_star_prob = Table(star_prob_info["output"])
for col in expected_star_prob.colnames:
if col == "ext_O_star":
expected_star_prob[col] = np.nan
if col == "dusty_agb":
expected_star_prob[col] = np.nan
# run star_type_probability
star_prob = star_type_probability.star_type_probability(
self.pdf1d_fname_cache, temp_pdf2d_fname, **star_prob_info["input"],
)
# compare to expected table
compare_tables(expected_star_prob, Table(star_prob))
@pytest.mark.skip(reason="updated cached file needed")
def test_calc_depth_from_completeness(self):
"""
Test for calculate_depth.py
"""
# calculate depth for 50% and 75% completeness
depth = calc_depth_from_completeness.calc_depth(
self.seds_fname_cache,
self.noise_fname_cache,
completeness_value=[0.5, 0.75],
vega_mag=True,
)
# expected results
expected_dict = {
"HST_WFC3_F275W": [25.000309202589012, 24.80610510139205],
"HST_WFC3_F336W": [24.65974845352875, 24.338061586936263],
"HST_ACS_WFC_F475W": [np.nan, np.nan],
"HST_ACS_WFC_F814W": [np.nan, 24.368742437736692],
"HST_WFC3_F110W": [np.nan, np.nan],
"HST_WFC3_F160W": [21.99298441116123, 21.504534701422067],
}
# compare them
compare_tables(Table(expected_dict), Table(depth))
# ###################################################################
# tools.run tests
@pytest.mark.skip(reason="need to fix issue with folder teardown")
@pytest.mark.usefixtures("setup_create_physicsmodel")
def test_create_physicsmodel_no_subgrid(self):
"""
Test create_physicsmodel.py, assuming no subgrids
"""
# run create_physicsmodel
create_physicsmodel.create_physicsmodel(
self.settings, nsubs=self.settings.n_subgrid, nprocs=1
)
# check that files match
# - isochrones
table_cache = Table.read(
self.iso_fname_cache, format="ascii.csv", comment="#", delimiter=",",
)
table_new = Table.read(
f"./{self.settings.project}/{self.settings.project}_iso.csv",
format="ascii.csv",
comment="#",
delimiter=",",
)
compare_tables(table_cache, table_new)
# - spectra with priors
compare_hdf5(
self.priors_fname_cache,
f"./{self.settings.project}/{self.settings.project}_spec_w_priors.grid.hd5",
)
# - SEDs grid
compare_hdf5(
self.seds_fname_cache,
f"./{self.settings.project}/{self.settings.project}_seds.grid.hd5",
)
@pytest.mark.skip(reason="need to fix issue with folder teardown")
@pytest.mark.usefixtures("setup_create_physicsmodel")
def test_create_physicsmodel_with_subgrid(self):
"""
Test create_physicsmodel.py, assuming two subgrids
"""
# run create_physicsmodel
create_physicsmodel.create_physicsmodel(
self.settings_sg, nsubs=self.settings_sg.n_subgrid, nprocs=1
)
# check that files match
# - isochrones
table_cache = Table.read(
self.iso_fname_cache, format="ascii.csv", comment="#", delimiter=",",
)
table_new = Table.read(
"beast_metal_small_subgrids/beast_metal_small_subgrids_iso.csv",
format="ascii.csv",
comment="#",
delimiter=",",
)
compare_tables(table_cache, table_new)
# - spectra with priors
compare_hdf5(
self.priors_fname_cache,
"./beast_metal_small_subgrids/beast_metal_small_subgrids_spec_w_priors.grid.hd5",
)
compare_hdf5(
self.priors_sub0_fname_cache,
"beast_metal_small_subgrids/beast_metal_small_subgrids_spec_w_priors.gridsub0.hd5",
)
compare_hdf5(
self.priors_sub1_fname_cache,
"beast_metal_small_subgrids/beast_metal_small_subgrids_spec_w_priors.gridsub1.hd5",
)
# - SEDs grid
compare_hdf5(
self.seds_sub0_fname_cache,
"beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub0.hd5",
)
compare_hdf5(
self.seds_sub1_fname_cache,
"beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub1.hd5",
)
# - list of subgrids
with open("./beast_metal_small_subgrids/subgrid_fnames.txt") as f:
temp = f.read()
subgrid_list = [x for x in temp.split("\n") if x != ""]
expected_list = [
"beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub0.hd5",
"beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub1.hd5",
]
assert subgrid_list == expected_list, "subgrid_fnames.txt has incorrect content"
@pytest.mark.skip(reason="need to fix issue with folder teardown")
@pytest.mark.usefixtures("setup_create_obsmodel")
def test_create_obsmodel_no_subgrid(self):
"""
Test create_obsmodel.py, assuming no subgrids
"""
print("running test_create_obsmodel_no_subgrid")
# run create_obsmodel
create_obsmodel.create_obsmodel(
self.settings,
use_sd=False,
nsubs=self.settings.n_subgrid,
nprocs=1,
)
# check that files match
compare_hdf5(
self.noise_fname_cache,
"beast_metal_small/beast_metal_small_noisemodel.grid.hd5",
)
@pytest.mark.skip(reason="need to fix issue with folder teardown")
@pytest.mark.usefixtures("setup_create_obsmodel")
def test_create_obsmodel_with_subgrid(self):
"""
Test create_obsmodel.py, assuming two subgrids
"""
print("running test_create_obsmodel_with_subgrid")
# run create_obsmodel
create_obsmodel.create_obsmodel(
self.settings_sg,
use_sd=False,
nsubs=self.settings_sg.n_subgrid,
nprocs=1,
)
# check that files match
compare_hdf5(
self.noise_sub0_fname_cache,
"beast_metal_small_subgrids/beast_metal_small_subgrids_noisemodel.gridsub0.hd5",
)
compare_hdf5(
self.noise_sub1_fname_cache,
"beast_metal_small_subgrids/beast_metal_small_subgrids_noisemodel.gridsub1.hd5",
)
# ###################################################################
# specific helper functions
def split_and_check(grid_fname, num_subgrids):
"""
Split a sed grid into subgrids and test the contents of the subgrids
are as expected and concatenating the subgrid components (seds, grid)
gives the full sed grid.
Parameters
----------
grid_fname : str
filename for the sed grid
num_subgrids : int
number of subgrids to split the sed grid into
"""
complete_g = SEDGrid(grid_fname)
sub_fnames = subgridding_tools.split_grid(grid_fname, num_subgrids)
# count the number of grid cells
sub_seds = []
sub_grids = []
for sub_fname in sub_fnames:
sub_g = SEDGrid(sub_fname)
sub_seds.append(sub_g.seds)
sub_grids.append(sub_g.grid)
np.testing.assert_equal(complete_g.lamb, sub_g.lamb)
if not complete_g.grid.colnames == sub_g.grid.colnames:
raise AssertionError()
sub_seds_reconstructed = np.concatenate(sub_seds)
np.testing.assert_equal(sub_seds_reconstructed, complete_g.seds)
sub_grids_reconstructed = np.concatenate(sub_grids)
np.testing.assert_equal(sub_grids_reconstructed, complete_g.grid)
# the split method skips anything that already exists, so if we
# want to use this function multiple times for the same test
# grid, we need to do this.
for f in sub_fnames:
os.remove(f)
@pytest.fixture(scope="function")
def setup_create_physicsmodel(request):
"""
Make sure that the folders (and their contents) from the create_physicsmodel
tests get deleted after the tests run
"""
# no setup needed
# run tests
yield
# remove folders
basename = f"./{request.cls.settings.project}"
if os.path.isdir(basename):
shutil.rmtree(basename)
if os.path.isdir(f"{basename}_subgrids"):
shutil.rmtree(f"{basename}_subgrids")
@pytest.fixture(scope="function")
def setup_create_obsmodel(request):
"""
Make symlink to files needed for create_obsmodel test so that they're in
the proper folder. Delete symlinks after create_obsmodel tests have run.
"""
# print('setting up files for create_obsmodel')
# create folders
basename = f"./{request.cls.settings.project}"
os.mkdir(basename)
os.mkdir(f"{basename}_subgrids")
# make symlinks to SED data
source_list = [
request.cls.seds_fname_cache,
request.cls.seds_sub0_fname_cache,
request.cls.seds_sub1_fname_cache,
]
dest_list = [
"./beast_metal_small/beast_metal_small_seds.grid.hd5",
"./beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub0.hd5",
"./beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub1.hd5",
]
for source, dest in zip(source_list, dest_list):
os.symlink(os.path.abspath(source), os.path.abspath(dest))
# make a subgrid file name list
with open("./beast_metal_small_subgrids/subgrid_fnames.txt", "w") as f:
f.write(dest_list[1] + "\n" + dest_list[2] + "\n")
# run tests
yield
# remove folders/symlinks
# print('teardown for create_obsmodel')
if os.path.isdir(basename):
shutil.rmtree(basename)
if os.path.isdir(f"{basename}_subgrids"):
shutil.rmtree(f"{basename}_subgrids")
|
import sys, os
sys.path.append(os.path.abspath('../day 1'))
from baseclass import Solution
# imports required for solution:
class Solution_Repo(Solution):
def __init__(self):
Solution.__init__(self)
self.REPO_OWNER = "Akumatic"
self.REPO_URL = "https://github.com/Akumatic/Advent-of-Code"
self.FILENAME = "solutions/solution_3.py"
def part_1(self):
with open(os.path.dirname(__file__) + "/../input.txt", "r") as f:
groups = [line.split() for line in f.read().strip().split("\n\n")]
def count(groups: list, everyone: bool) -> int:
result = 0
for group in groups:
answers = {chr(c):0 for c in range(97, 123)}
for answer in group:
for letter in answer:
answers[letter] += 1
if everyone:
result += sum([1 for letter in answers if answers[letter] == len(group)])
else:
result += sum([1 for letter in answers if answers[letter]])
return result
return count(groups, False)
def part_2(self):
with open(os.path.dirname(__file__) + "/../input.txt", "r") as f:
groups = [line.split() for line in f.read().strip().split("\n\n")]
def count(groups: list, everyone: bool) -> int:
result = 0
for group in groups:
answers = {chr(c):0 for c in range(97, 123)}
for answer in group:
for letter in answer:
answers[letter] += 1
if everyone:
result += sum([1 for letter in answers if answers[letter] == len(group)])
else:
result += sum([1 for letter in answers if answers[letter]])
return result
return count(groups, True) |
import time
import string
import secrets
import requests
from rich import print
from rich.markup import escape
from rich.console import Console
from paramiko import SSHClient, AutoAddPolicy
from paramiko.ssh_exception import NoValidConnectionsError
console = Console()
def generatePassword(length) -> str:
return "".join([secrets.choice(string.ascii_letters + string.digits + string.punctuation) for _ in range(length)])
logo = """
$$\ $$\ $$\ $$$$$$\ $$$$$$\
$$ | $\ $$ |\__| $$ __$$\ $$ __$$\
$$ |$$$\ $$ |$$\ $$$$$$\ $$$$$$\ $$ / \__| $$$$$$\ $$ / \__|$$$$$$\
$$ $$ $$\$$ |$$ |$$ __$$\ $$ __$$\ \$$$$$$\ \____$$\ $$$$\ $$ __$$\
$$$$ _$$$$ |$$ |$$ | \__|$$$$$$$$ | \____$$\ $$$$$$$ |$$ _| $$$$$$$$ |
$$$ / \$$$ |$$ |$$ | $$ ____|$$\ $$ |$$ __$$ |$$ | $$ ____|
$$ / \$$ |$$ |$$ | \$$$$$$$\ \$$$$$$ |\$$$$$$$ |$$ | \$$$$$$$\
\__/ \__|\__|\__| \_______| \______/ \_______|\__| \_______|
"""
version = "1.0.0"
console.print(logo, style="magenta")
console.print("Transmit your data safely across the wire at any time, anywhere.", style="italic magenta")
def inquire(question):
return console.input(f"[green]?[/green] [bold]{question}[/bold] ", password=True)
api_key = inquire("Enter your Linode API key")
# print(api_key)
def read_stdout(stdout):
for line in iter(stdout.readline, ""):
console.print(line, end="")
class LinodeSession(requests.Session):
def __init__(self, api_key=None, *args, **kwargs):
super(LinodeSession, self).__init__(*args, **kwargs)
self.headers.update({"Authorization": f"Bearer {api_key}"})
def request(self, method, url, *args, **kwargs):
return super(LinodeSession, self).request(method, f"https://api.linode.com/v4{url}", *args, **kwargs)
with console.status("Provisioning server...") as status, LinodeSession(api_key=api_key) as ls:
regions = ls.get("/regions").json()
region = regions["data"][0]
console.print("[green]✓[/green] Available regions retrieved")
linode_types = ls.get("/linode/types").json()
linode_type = linode_types["data"][0]
console.print("[green]✓[/green] Linode types retrieved")
root_pass = generatePassword(12)
console.print(f"[green]✓[/green] Root password generated: [red]{escape(root_pass)}[/red]")
linode = ls.post("/linode/instances", json={
"image": "linode/debian11",
"region": region["id"],
"type": linode_type["id"],
"root_pass": root_pass
}).json()
console.print("[green]✓[/green] Creating Linode")
while (linode_info := ls.get(f"/linode/instances/{linode['id']}").json())["status"] != "running":
status.update(status=f"{linode_info['status'].capitalize()} server...")
time.sleep(5)
server_ip = linode_info["ipv4"][0]
status.update(status="Connecting to server...")
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(AutoAddPolicy())
while True:
try:
ssh.connect(server_ip, username="root", password=root_pass)
break
except NoValidConnectionsError:
time.sleep(2)
_, stdout, _ = ssh.exec_command("whoami")
console.print(f"[green]✓[/green] Logged in as [green]{stdout.read().strip().decode()}[/green]")
status.update(status="Installing wireguard on the server...")
# _, stdout, _ = ssh.exec_command("yes | pacman -Syu")
# read_stdout(stdout)
_, stdout, _ = ssh.exec_command("apt update && apt install -yy wireguard iptables > /dev/null 2>&1", get_pty=True)
stdout.channel.recv_exit_status()
# Server keys
_, stdout, _ = ssh.exec_command("wg genkey", get_pty=True)
server_priv = stdout.read().strip().decode()
console.print(f"[green]✓[/green] Generated server private key: [red]{server_priv}[/red]")
_, stdout, _ = ssh.exec_command(f"echo {server_priv} | wg pubkey")
server_pub = stdout.read().strip().decode()
console.print(f"[green]✓[/green] Generated server public key: [green]{server_pub}[/green]")
# Client keys
_, stdout, _ = ssh.exec_command("wg genkey")
client_priv = stdout.read().strip().decode()
console.print(f"[green]✓[/green] Generated client private key: [red]{client_priv}[/red]")
_, stdout, _ = ssh.exec_command(f"echo {client_priv} | wg pubkey")
client_pub = stdout.read().strip().decode()
console.print(f"[green]✓[/green] Generated client public key: [green]{client_pub}[/green]")
ssh.exec_command(f"echo 1 > /proc/sys/net/ipv4/ip_forward")
console.print("[green]✓[/green] Enable IPv4 forwarding")
# Write config file
_, stdout, _ = ssh.exec_command(f"""cat > /etc/wireguard/wg0.conf << 'EOF'
[Interface]
Address = 10.0.0.1/24
SaveConfig = false
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE; ip6tables -A FORWARD -i %i -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens4 -j MASQUERADE
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -t nat -D POSTROUTING -o ens4 -j MASQUERADE; ip6tables -D FORWARD -i %i -j ACCEPT; ip6tables -t nat -D POSTROUTING -o ens4 -j MASQUERADE
ListenPort = 443
PrivateKey = {server_priv}
[Peer]
PublicKey = {client_pub}
AllowedIPs = 10.0.0.2/32
EOF""")
stdout.channel.recv_exit_status()
console.print("[green]✓[/green] Wireguard configured")
status.update("Starting wireguard service...")
_, stdout, _ = ssh.exec_command("systemctl enable wg-quick@wg0 && systemctl start wg-quick@wg0")
stdout.channel.recv_exit_status()
status.update("[green]✓[/green] Wireguard services successfully started")
console.print()
console.print(f"""[cyan]Client config:[/cyan]
[Interface]
PrivateKey = {client_priv}
Address = 10.0.0.2/32
DNS = 1.1.1.1, 1.0.0.1
MTU = 1380
[Peer]
PublicKey = {server_pub}
AllowedIPs = 0.0.0.0/0
Endpoint = {server_ip}:443
PersistentKeepalive = 21
""")
|
# Time: O(r * c)
# Space: O(1)
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def matrixScore(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
R, C = len(A), len(A[0])
result = 0
for c in xrange(C):
col = 0
for r in xrange(R):
col += A[r][c] ^ A[r][0]
result += max(col, R-col) * 2**(C-1-c)
return result
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time
SERVO_FREQUENCY = 50
class Servo:
def __init__(self,servoPin):
self._servoPin = servoPin
GPIO.setmode(GPIO.BCM)
GPIO.setup(servoPin, GPIO.OUT)
self.pwm = GPIO.PWM(servoPin, SERVO_FREQUENCY) # GPIO 17 for PWM with 50Hz
self.pwm.start(0) # Initialization
def update_angle(self,angle):
duty_cycle = 2+angle/18
self.pwm.ChangeDutyCycle(duty_cycle)
time.sleep(2)
try:
servo = Servo(17)
while True:
servo.update_angle(90)
servo.update_angle(180)
servo.update_angle(45)
servo.update_angle(0)
except KeyboardInterrupt:
servo.pwm.stop()
GPIO.cleanup() |
import utilities as utils
import torch
import numpy as np
import scipy.optimize as opt
from collections import defaultdict
import math
import gurobipy as gb
#from mosek import iparam
from cvxopt import matrix, solvers
solvers.options['show_progress'] = False
#solvers.options['mosek'] = {iparam.log: 0,
# iparam.max_num_warnings: 0}
import copy
import time
class GurobiSquire(object):
""" Wrapper to hold attributes for the gurobi model since gb.Model won't let
me set attributes
"""
def __init__(self):
pass
################################################################################
# POLYTOPE CLASS #
# #
################################################################################
class Polytope(object):
######################################################################
# #
# Polytope Initializations #
# #
######################################################################
def __init__(self, ub_A, ub_b, x_np, config=None, interior_point=None,
domain=None, dead_constraints=None, gurobi=True,
linear_map=None, lipschitz_ub=None, c_vector=None):
""" Polytopes are of the form Ax <= b
with no strict equality constraints"""
if isinstance(ub_A, torch.Tensor):
ub_A = ub_A.cpu().detach().numpy().astype(np.double)
if isinstance(ub_b, torch.Tensor):
ub_b = ub_b.cpu().detach().numpy().astype(np.double)
self.ub_A = utils.as_numpy(ub_A).astype(np.double)
self.ub_b = utils.as_numpy(ub_b).squeeze().astype(np.double)
self.config = config
self.interior_point = interior_point
self.domain = domain # is a domain object now
self.dead_constraints = dead_constraints
self.gurobi = gurobi
self.gurobi_model = None
self.gurobi_squire = None
self.x_np = x_np
self.linear_map = linear_map
self.lipschitz_ub = lipschitz_ub
self.lipschitz_constrs = []
if c_vector is None:
self.c_vector = c_vector
else:
self.c_vector = [utils.as_numpy(_) for _ in c_vector]
@classmethod
def from_polytope_dict(cls, polytope_dict, x_np, domain=None,
dead_constraints=None,
gurobi=True,
lipschitz_ub=None,
c_vector=None):
""" Alternate constructor of Polytope object """
linear_map = {'A': utils.as_numpy(polytope_dict['total_a']),
'b': utils.as_numpy(polytope_dict['total_b'])}
return cls(polytope_dict['poly_a'],
polytope_dict['poly_b'],
x_np,
config=polytope_dict['configs'],
domain=domain,
dead_constraints=dead_constraints,
gurobi=gurobi,
linear_map=linear_map,
lipschitz_ub=lipschitz_ub,
c_vector=c_vector)
######################################################################
# #
# FACET GENERATION TECHNIQUES #
# #
######################################################################
def generate_facets_naive(self, check_feasible=False):
""" Generates all (n-1) dimensional facets of polytope
NOTES: Most naive implementation, and uses no heuristics.
Doesn't care about domain, is very slow, and not useful for
verifying neural nets. Useful in Batch implementation though
"""
num_constraints = self.ub_A.shape[0]
facets = []
for i in range(num_constraints):
facet = Face(self.ub_A, self.ub_b, [i], None)
if not check_feasible:
facets.append(facet)
else:
if facet._is_feasible():
facets.append(facet)
return facets
def generate_facets_configs(self, seen_dict, missed_dict=None):
""" Does Facet checking in parallel using joblib to farm out multiple
jobs to various processes (possibly on differing processors)
NOTES:
Main technique using all the heuristics that don't solve any LPs and
then farms out the things that need LP's to be done in parallel.
Uses joblib to farm out checking of feasibility, facet-ness,
boundedness and then does the novelty check in-serial
ORDER OF OPERATIONS:
(1): Removes all possible facets that are tight on
'dead constraints'
(2): Removes all possible facets that don't have a feasible
point in the box constraint
(3): Removes all possible facets that have minimal projection
outside the upper bound
(4): Farms out the remaining facets for feasibility/interior point
checks
"""
######################################################################
# First set things up #
######################################################################
num_facets = self.ub_A.shape[0]
potential_facets = [_ for _ in range(num_facets)]
reject_dict = defaultdict(int)
domain_feasible = self.domain.feasible_facets(self.ub_A, self.ub_b)
######################################################################
# Step 1: Remove facets that are tight on dead constraints #
# Step 2: Remove facets that are infeasible within domain #
######################################################################
new_potential_facets = []
for idx in potential_facets:
if self._is_dead(idx):
reject_dict['dead_constraints'] += 1
elif idx not in domain_feasible:
reject_dict['domain_infeasible'] += 1
else:
new_potential_facets.append(idx)
potential_facets = new_potential_facets
######################################################################
# Step 3: Remove facets that aren't feasible within upper bound #
######################################################################
upper_bound_proj = self.domain.minimal_facet_projections(self.ub_A,
self.ub_b)
new_potential_facets = []
for idx in potential_facets:
if idx not in upper_bound_proj:
reject_dict['upper_bound'] += 1
else:
new_potential_facets.append(idx)
potential_facets = new_potential_facets
#####################################################################
# Step 4: Remove all facets that have been seen before #
#####################################################################
# Also remove the infeasible facets
potential_facets, num_seen, num_missed = \
self.scrub_seen_idxs(potential_facets, seen_dict,
missed_dict)
if num_seen > 0:
reject_dict['seen before'] += num_seen
if num_missed > 0:
reject_dict['missed before'] += num_missed
####################################################################
# Step 5: Set up the Gurobi model for reusable optimization #
####################################################################
self._build_gurobi_model()
######################################################################
# Step 6: Construct the facet objects #
######################################################################
facets = [self.facet_constructor(idx) for idx in potential_facets]
return facets, reject_dict
##########################################################################
# #
# HELPER METHODS #
# #
##########################################################################
def _build_gurobi_model(self, indices_to_include=None):
""" Builds a gurobi model with all the constraints added (except those
specified in indices_to_ignore
ARGS:
indices_to_include: if not None, is np.array of indices to include
if None, includes all indices
RETURNS:
None, but modifies self.gurobi_model
"""
model = gb.Model()
dim = self.ub_A.shape[1]
# add variables
if self.domain.box_low is not None:
try:
lo_minus_x = self.domain.box_low - self.x_np
lb_i = lambda i: lo_minus_x[i]
except Exception as err:
print("ERROR HERE")
print(self.domain.box_low.__class__)
print(self.x_np.__class__)
raise err
else:
lb_i = lambda i: -gb.GRB.INFINITY
if self.domain.box_high is not None:
hi_minus_x = self.domain.box_high - self.x_np
ub_i = lambda i: hi_minus_x[i]
else:
ub_i = lambda i: gb.GRB.INFINITY
# --- variables representing 'v' in LP/QP projections
v_vars = [model.addVar(lb=lb_i(i), ub=ub_i(i), name='v%s' % i)
for i in range(dim)]
# --- variables representing t for linprogs etc
aux_vars = []
aux_vars.append(model.addVar(lb=0, ub=gb.GRB.INFINITY, name='t'))
model.update()
# add constraints
# --- constraints for being in the polytope
Ax = self.ub_A.dot(self.x_np)
ub_b_minus_Ax = self.ub_b - Ax
if indices_to_include is not None:
a_rows = self.ub_A[indices_to_include, :]
b_rows = ub_b_minus_Ax[indices_to_include]
else:
a_rows = self.ub_A
b_rows = ub_b_minus_Ax
m = a_rows.shape[0]
_ = model.addConstrs(gb.LinExpr(a_rows[i], v_vars) <= b_rows[i]
for i in range(m))
# --- constraints for being in the domain
# if self.domain.box_low is not None:
# x_minus_lo = self.x_np - self.domain.box_low
# _ = model.addConstrs(v_vars[i] <= x_minus_lo[i] for i in range(dim))
# if self.domain.box_high is not None:
# hi_minus_x = self.domain.box_high - self.x_np
# _ = model.addConstrs(v_vars[i] <= hi_minus_x[i] for i in range(dim))
model.update()
model.setParam('OutputFlag', False)
self.gurobi_model = model
self.gurobi_squire = GurobiSquire()
def _is_feasible(self):
""" Runs a gurobi check to see if this is a feasible model"""
if self.gurobi_model is None:
self._build_gurobi_model()
self.gurobi_model.setObjective(0)
self.gurobi_model.update()
self.gurobi_model.optimize()
return self.gurobi_model.Status
def _is_dead(self, i):
""" Just a quick check for deadness of a constraint. We don't need
to build faces for neurons that we know to be fixed to be on or off
ARGS:
i : int - constraint index
RETURNS:
False if no known dead constraints or we're not sure about this
constraint. True o.w.
"""
return (self.dead_constraints is not None and
self.dead_constraints[i])
def facet_constructor(self, tight_idx, facet_type='facet',
extra_tightness=None):
return Face(self.ub_A, self.ub_b, [tight_idx], config=self.config,
domain=self.domain, facet_type=facet_type, x_np=self.x_np,
extra_tightness=extra_tightness,
gurobi_model=self.gurobi_model,
gurobi_squire=self.gurobi_squire,
linear_map=self.linear_map,
lipschitz_ub=self.lipschitz_ub,
c_vector=self.c_vector)
def scrub_seen_idxs(self, idx_list, seen_dict, missed_dict=None):
""" Removes facets we've seen before, where idx_list is which idx is
tight. Also removes the cache-miss polytopes
"""
if missed_dict is None:
missed_dict = {}
output_idxs, num_seen_before, num_missed_before = [], 0, 0
for idx in idx_list:
flip_i, flip_j = utils.index_to_config_coord(self.config, idx)
new_configs = copy.deepcopy(self.config)
new_configs[flip_i][flip_j] = int(1 - new_configs[flip_i][flip_j])
new_flat = utils.flatten_config(new_configs)
if new_flat in seen_dict:
num_seen_before += 1
elif new_flat in missed_dict:
num_missed_before += 1
else:
output_idxs.append(idx)
return output_idxs, num_seen_before, num_missed_before
##############################################################################
# #
# FACE CLASS #
# #
##############################################################################
class Face(Polytope):
def __init__(self, poly_a, poly_b, tight_list, x_np, config=None,
domain=None, dead_constraints=None, removal_list=None,
facet_type=None, gurobi_model=None, gurobi_squire=None,
extra_tightness=None,
linear_map=None, lipschitz_ub=None, c_vector=None):
super(Face, self).__init__(poly_a, poly_b, x_np, config=config,
domain=domain,
dead_constraints=dead_constraints)
if tight_list[0] is None:
assert extra_tightness is not None
self.a_eq = extra_tightness['A'].reshape((1, -1))
self.b_eq = extra_tightness['b']
else:
self.a_eq = self.ub_A[tight_list]
self.b_eq = self.ub_b[tight_list]
self.tight_list = tight_list
self.is_feasible = None
self.is_facet = None
self.interior = None
self.removal_list = removal_list
assert facet_type in [None, 'decision', 'facet']
self.facet_type = facet_type
self.gurobi_model = gurobi_model
self.gurobi_squire = gurobi_squire
self.extra_tightness = extra_tightness
self.linear_map = linear_map
self.lipschitz_ub = lipschitz_ub
self.c_vector = c_vector
def get_new_configs(self):
''' Function takes original ReLu configs and flips the activation of
the ReLu at index specified in 'tight_boolean_configs'.
'''
# New and improved version:
# Looks at the tight list, maps the tight index to the 2d
# coordinate in the config and flips the index_map
#assert self.interior is not None
orig_configs = self.config
tight_idx = self.tight_list[0]
flip_i, flip_j = utils.index_to_config_coord(orig_configs, tight_idx)
new_configs = copy.deepcopy(orig_configs)
new_configs[flip_i][flip_j] = int(1 - new_configs[flip_i][flip_j])
return new_configs
def fast_domain_check(self):
""" Does the fast checks to see if we can reject this facet based on
the domain.
Returns:
True if we cannot reject this without checking an LP/QP
False if we can for sure reject this
"""
domain = self.domain
if self.tight_list[0] is None: # adversarial constraint here
dec_A = self.extra_tightness['A']
dec_b = self.extra_tightness['b']
A = np.vstack((self.ub_A, dec_A.reshape(1, -1)))
b = np.hstack((self.ub_b, dec_b))
checklist = [A.shape[0] - 1]
else: # regular facet here
A = self.ub_A
b = self.ub_b
checklist = self.tight_list
domain_feasible = domain.feasible_facets(A, b, checklist)
if len(domain_feasible) == 0:
return False
# Do checks to see if this hyperplane has projection inside ball
projection = domain.minimal_facet_projections(A, b, checklist)
return len(projection) > 0
##########################################################################
# #
# GUROBI DISTANCE FUNCTIONS #
# #
##########################################################################
def linf_dist_gurobi(self, x):
""" Computes the l_infinity distance to point x using gurobi
"""
v_vars = [v for v in self.gurobi_model.getVars()
if v.VarName.startswith('v')]
# First do a hacky check to see if this model has already had LP setup
if not hasattr(self.gurobi_squire, 'linf_dist_setup'):
self.gurobi_squire.linf_dist_setup = True
# If hasn't been set up yet, set up the general LP constraints
# --- add -t<= v_i <= t
t = self.gurobi_model.getVarByName('t')
for v_var in v_vars:
self.gurobi_model.addConstr(-t <= v_var)
self.gurobi_model.addConstr(v_var <= t)
# --- add t <= upper_bound
self.gurobi_model.addConstr(t <= self.domain.linf_radius)
# --- add objective
# --- --- if self.lipschitz_ub is not None, we incorporate this
# objective as follows
#
if self.lipschitz_ub is None:
self.gurobi_model.setObjective(t, gb.GRB.MINIMIZE)
else:
"""
If self.lipschitz_ub is not None, then we incorporate this
objective as follows
Recall our setting is
min_{y in F} ||y -x|| + z
s.t. z >= |c_j^T(f(y) - f(DB))| / L_j
for all j != true label
(and f(DB)=0 and c_j^Tf(y) >= 0 and linear)
Then we need to compute g_j(y) := c_j^Tf(y) / L_j
for each j (as a linear functional)
But the minimization works like (letting y = x + v)
min_{x+v in F} ||v||_infty + z
s.t. z >= c_j^Tf(x+v) / L_j
and c_j^Tf(x+v) = a_j^T(x +v) + b_j = a_j^Tv + (b_j + a_j^Tx)
so z >= a_j^Tv + (b_j + a_j^Tx)
and if f(y) = Ay + b
where a_j := c_j^TA/L_j and b_j = c_j^Tb/L_j
"""
# First step is to compute the a_j/b_j for each c vector
lin_A = self.linear_map['A']
lin_b = self.linear_map['b']
a_js, b_js = [], []
for lip_val, c_vec in zip(self.lipschitz_ub, self.c_vector):
a_js.append(c_vec.dot(lin_A) / lip_val)
b_js.append(c_vec.dot(lin_b) / lip_val)
# Then we can add the constraint of z to everything
# (lip_var >= a_j^T v + (b_j + a_j^Tx))
lip_var = self.gurobi_model.addVar(lb=0, name='lip_var')
lipschitz_constrs = []
for j in range(len(a_js)):
a_j, b_j = a_js[j], b_js[j]
linexpr_j = gb.LinExpr(a_j, v_vars)
const_j = b_j + a_j.dot(self.x_np)
lip_constr = (lip_var >= linexpr_j + const_j)
lipschitz_constrs.append(lip_constr)
self.gurobi_squire.lipschitz_constrs = lipschitz_constrs
self.gurobi_model.update()
# Now we can remove any equality constraints already set
try:
self.gurobi_model.remove(self.gurobi_model.getConstrByName('facet'))
self.gurobi_model.update()
except gb.GurobiError:
pass
# Add the new equality constraint
if self.facet_type == 'facet':
tight_row = self.ub_A[self.tight_list[0], :]
tight_b = self.ub_b[self.tight_list[0]] - tight_row.dot(self.x_np)
elif self.facet_type == 'decision':
tight_row = self.extra_tightness['A']
tight_b = self.extra_tightness['b'] - tight_row.dot(self.x_np)
self.gurobi_model.addConstr(gb.LinExpr(tight_row, v_vars) == tight_b,
name='facet')
# Now branch to handle the lipschitz cases...
t_var = self.gurobi_model.getVarByName('t')
if self.lipschitz_ub is None:
self.gurobi_model.setObjective(t_var, gb.GRB.MINIMIZE)
self.gurobi_model.update()
self.gurobi_model.optimize()
if self.gurobi_model.Status != 2:
return None, None
else:
obj_value = self.gurobi_model.getObjective().getValue()
opt_point = self.x_np + np.array([v.X for v in v_vars])
return obj_value, opt_point
lip_var = self.gurobi_model.getVarByName('lip_var')
self.gurobi_model.setObjective(t_var + lip_var, gb.GRB.MINIMIZE)
objs_opts = []
opt_times = []
for lip_constr in self.gurobi_squire.lipschitz_constrs:
try:
self.gurobi_model.remove(self.gurobi_model.getConstrByName('lipschitz'))
except gb.GurobiError:
pass
start_time = time.time()
self.gurobi_model.addConstr(lip_constr, name='lipschitz')
self.gurobi_model.update()
self.gurobi_model.optimize()
opt_times.append('%.04f' % (time.time() - start_time))
if self.gurobi_model.Status == 3:
return None, None
objs_opts.append((self.gurobi_model.getObjective().getValue(),
np.array([v.X for v in v_vars])))
# print("OPT TIMES: ", ' '.join(opt_times))
min_pair = min(objs_opts, key=lambda pair: pair[0])
return min_pair[0], min_pair[1] + self.x_np
def l2_dist_gurobi(self, x):
""" Returns the l_2 distance to point x, and projection using Gurobi"""
v_vars = [v for v in self.gurobi_model.getVars()
if v.VarName.startswith('v')]
t_var = self.gurobi_model.getVarByName('t')
######################################################################
# Do the setups if necessary #
######################################################################
if not hasattr(self.gurobi_squire, 'l2_dist_setup'):
self.gurobi_squire.l2_dist_setup = True
# If hasn't been setup yet, setup the objective
l2_obj = gb.quicksum(v * v for v in v_vars)
self.gurobi_model.addConstr(t_var * t_var >= l2_obj)
if self.lipschitz_ub is None:
self.gurobi_model.setObjective(t_var, gb.GRB.MINIMIZE)
else:
# First step is to compute the a_j/b_j for each c vector
lin_A = self.linear_map['A']
lin_b = self.linear_map['b']
a_js, b_js = [], []
for lip_val, c_vec in zip(self.lipschitz_ub, self.c_vector):
a_js.append(c_vec.dot(lin_A) / lip_val)
b_js.append(c_vec.dot(lin_b) / lip_val)
# Then we can add the constraint of z to everything
# (lip_var >= a_j^T v + (b_j + a_j^Tx))
lip_var = self.gurobi_model.addVar(lb=0, name='lip_var')
lipschitz_constrs = []
for j in range(len(a_js)):
a_j, b_j = a_js[j], b_js[j]
linexpr_j = gb.LinExpr(a_j, v_vars)
const_j = b_j + a_j.dot(self.x_np)
lip_constr = (lip_var >= linexpr_j + const_j)
lipschitz_constrs.append(lip_constr)
self.gurobi_squire.lipschitz_constrs = lipschitz_constrs
self.gurobi_model.update()
######################################################################
# Swap out the tight facet if necessary #
######################################################################
try:
self.gurobi_model.remove(self.gurobi_model.getConstrByName('facet'))
except gb.GurobiError:
pass
self.gurobi_model.update()
# --- add facet constraint
if self.facet_type == 'facet':
tight_row = self.ub_A[self.tight_list[0], :]
tight_b = self.ub_b[self.tight_list[0]] - tight_row.dot(self.x_np)
else:
tight_row = self.extra_tightness['A']
tight_b = self.extra_tightness['b'] - tight_row.dot(self.x_np)
self.gurobi_model.addConstr(gb.LinExpr(tight_row, v_vars) == tight_b,
name='facet')
######################################################################
# Now branch for the lipschitz cases #
######################################################################
if self.lipschitz_ub is None:
self.gurobi_model.setObjective(t_var, gb.GRB.MINIMIZE)
self.gurobi_model.update()
self.gurobi_model.optimize()
if self.gurobi_model.Status != 2:
return None, None
else:
obj_value = self.gurobi_model.getObjective().getValue()
opt_point = self.x_np + np.array([v.X for v in v_vars])
return obj_value, opt_point
lip_var = self.gurobi_model.getVarByName('lip_var')
#print("LIPSCHITZ OBJ")
self.gurobi_model.setObjective(t_var + lip_var, gb.GRB.MINIMIZE)
objs_opts = []
opt_times = []
for lip_constr in self.gurobi_squire.lipschitz_constrs:
try:
self.gurobi_model.remove(self.gurobi_model.getConstrByName('lipschitz'))
except gb.GurobiError:
pass
start_time = time.time()
self.gurobi_model.addConstr(lip_constr, name='lipschitz')
self.gurobi_model.update()
self.gurobi_model.optimize()
opt_times.append('%.04f' % (time.time() - start_time))
if self.gurobi_model.Status in [3, 4]:
return None, None
if self.gurobi_model.Status == 2:
objs_opts.append((self.gurobi_model.getObjective().getValue(),
np.array([v.X for v in v_vars])))
else:
if self.gurobi_model.Status == 12:
self.gurobi_model.setObjective(t_var, gb.GRB.MINIMIZE)
self.gurobi_model.update()
self.gurobi_model.optimize()
#print("GUROBI FAILED ONCE BUT NOW...")
#print(self.gurobi_model.Status)
#print("GUROBI WHAT???", self.gurobi_model.Status)
#print("OPT TIMES: ", ' '.join(opt_times))
try:
min_pair = min(objs_opts, key=lambda pair: pair[0])
except:
return None, None
return min_pair[0], min_pair[1] + self.x_np
##########################################################################
# #
# MOSEK DISTANCE FUNCTIONS #
# #
##########################################################################
def linf_dist(self, x):
""" Computes the l_infinity distance to point x using LP
The linear program is as follows
min_{t, v} t
such that
1) A(x + v) <= b (<==>) Av <= b - Ax
2) -t <= v_i <= t (<==>) v_i - t <= 0 AND -v_i -t <= 0
3) (x + v) in Domain
5) t <= upper_bound
4) A_eq(x + v) = b_eq (<==>)
so if A has shape (m,n) and domain constraints have shape (d, n)
- (n + 1) variables
- (m + 2n + d) inequality constraints
- 1 equality constraint
"""
######################################################################
# Setup things needed for linprog #
######################################################################
m, n = self.ub_A.shape
zero_m_col = np.zeros((m, 1))
zero_n_col = np.zeros((n, 1))
x_row = utils.as_numpy(x).squeeze()
x_col = x_row.reshape(n, 1)
######################################################################
# Build constraints row by row #
######################################################################
# VARIABLES ARE (v, t)
a_constraints = []
b_constraints = []
# Constraint 1 has shape (m, n+1)
constraint_1a = np.hstack((self.ub_A, zero_m_col))
constraint_1b = (self.ub_b - self.ub_A.dot(x_col).squeeze()).reshape(-1)
assert constraint_1a.shape == (m, n + 1)
assert constraint_1b.shape == (m,)
a_constraints.append(constraint_1a)
b_constraints.append(constraint_1b)
# Constraint 2 has shape (2n, n+1)
constraint_2a_left = np.vstack((np.eye(n), -1 * np.eye(n)))
constraint_2a = np.hstack((constraint_2a_left,
-1 * np.ones((2 * n, 1))))
constraint_2b = np.zeros(2 * n)
assert constraint_2a.shape == (2 * n, n + 1)
assert constraint_2b.shape == (2 * n,)
a_constraints.append(constraint_2a)
b_constraints.append(constraint_2b)
# Constraint 3 is added by the domain
# If a full box, should have shape (2n, n + 1)
d_a, d_b = self.domain.original_box_constraints()
x_dx_low = x_row[self.domain.unmodified_bounds_low]
x_dx_high = x_row[self.domain.unmodified_bounds_high]
if d_a is not None:
d_a_rows = d_a.shape[0]
constraint_d_a = np.hstack((d_a, np.zeros((d_a_rows, 1))))
constraint_d_b = d_b + np.hstack((x_dx_low, -x_dx_high))
assert constraint_d_a.shape == (d_a_rows, n + 1)
assert constraint_d_b.shape == (d_a_rows,)
a_constraints.append(constraint_d_a)
b_constraints.append(constraint_d_b)
# Constraint 4 is upper bound constraint
if self.domain.linf_radius is not None:
constraint_4a = np.zeros((1, n + 1))
constraint_4a[0][-1] = 1
constaint_4b = np.array(self.domain.linf_radius)
a_constraints.append(constraint_4a)
b_constraints.append(constaint_4b)
# Constraint 5 is equality constraint, should have (1, n+1)
a_eq = matrix(np.hstack((self.a_eq, np.zeros((1,1)))))
b_eq = matrix((self.b_eq - self.a_eq.dot(x_row)).astype(np.double))
# Objective should have length (n + 1)
c = matrix(np.zeros(n + 1))
c[-1] = 1
ub_a = matrix(np.vstack(a_constraints))
ub_b = matrix(np.hstack(b_constraints))
start = time.time()
cvxopt_out = solvers.lp(c, ub_a, ub_b, A=a_eq, b=b_eq, solver='mosek')
end = time.time()
# print("LP SOLVED IN %.03f" % (end -start))
if cvxopt_out['status'] == 'optimal':
return cvxopt_out['primal objective'], \
(x_row + np.array(cvxopt_out['x'])[:-1].squeeze())
elif cvxopt_out['status'] in ['primal infeasible', 'unknown']:
return None, None
else:
print("About to fail...")
print("CVXOPT status", cvxopt_out['status'])
raise Exception("LINF DIST FAILED?")
def l2_dist(self, x):
""" Returns the l_2 distance to point x using LP
as well as the optimal value of the program
set up the quadratic program
min_{v} v^Tv (<==>) v^T I v
s.t.
1) A(x + v) <= b (<==>) Av <= b - Ax
2) A_eq(x + v) = b_eq (<==>) A_eq v = b_eq - A_eq x
"""
m, n = self.ub_A.shape
x_row = utils.as_numpy(x).squeeze()
x_col = x_row.reshape(-1, 1)
# Setup objective
P = matrix(np.identity(n))
q = matrix(np.zeros([n, 1]))
# Inequality constraints
# Need to add domain constraints too
d_a, d_b = self.domain.original_box_constraints()
x_dx_low = x_row[self.domain.unmodified_bounds_low]
x_dx_high = x_row[self.domain.unmodified_bounds_high]
if d_a is not None:
d_a_rows = d_a.shape[0]
constraint_d_a = d_a
constraint_d_b = d_b + np.hstack((x_dx_low, -x_dx_high))
assert constraint_d_a.shape == (d_a_rows, n)
assert constraint_d_b.shape == (d_a_rows,)
G = matrix(np.vstack([self.ub_A, constraint_d_a]))
h = matrix(np.hstack([self.ub_b - self.ub_A.dot(x_row),
constraint_d_b]))
# Equality constraints
A = matrix(self.a_eq.astype(np.double))
b = matrix((self.b_eq - self.a_eq.dot(x_row)).astype(np.double))
quad_start = time.time()
quad_program_result = solvers.qp(P, q, G, h, A, b, solver='mosek')
quad_end = time.time()
# print("QP SOLVED IN %.03f seconds" % (quad_end - quad_start))
if quad_program_result['status'] == 'optimal': # or quad_program_result['status'] == 'unknown':
v = np.array(quad_program_result['x'])
return np.linalg.norm(v), x_row + v.squeeze()
else:
return None, None
raise Exception("QPPROG FAILED: " + quad_program_result['status'])
|
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
def run():
"""Requirements for Task 2C"""
stations = build_station_list()
update_water_levels(stations)
N = 10
flooded_stations=stations_highest_rel_level(stations,N)
for station in flooded_stations:
print(station.name, station.relative_water_level())
if __name__ == "__main__":
print("*** Task 2C most at risk stations *** \n")
run() |
from rest_framework import serializers
from .models import Mechanic, ShopInventory, PairMechanic
class ShopInventorySerializer(serializers.ModelSerializer):
class Meta:
model = ShopInventory
fields = ('part_name', 'part_price', 'car_make', 'car_model')
class PairMechanicSerializer(serializers.ModelSerializer):
mechanic_name = serializers.CharField(source='mechanic.name')
service = serializers.CharField(source='service.service_Offered')
class Meta:
model = PairMechanic
fields = ('mechanic_name', 'service')
class AddMechanicSerializer(serializers.ModelSerializer):
class Meta:
model = PairMechanic
fields = ('mechanic', 'service') |
import urllib.request
import urllib.parse
import json
import requests
from sqlitedict import SqliteDict
import util
import os
db_path = 'leetcode.db'
user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
def withUrl(u):
return "https://leetcode-cn.com/"+u
def leetcode_key(id):
return "leetcode_"+str(id)
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
class Leetcode:
def __init__(self):
self.dict = self.init_db()
self.finished = []
self.flasks = []
# read user
p = util.get_root("user", "leetcode")
entries = os.listdir(p)
for k in entries:
if k.endswith(".cpp") or k.endswith(".java"):
self.finished.append(k)
elif k.endswith(".md"):
self.flasks.append(k)
def init_db(self):
d = SqliteDict(util.get_db('leetcode.sqlite'), autocommit=True)
return d
def close_db(self):
self.dict.close()
def get_tag_problems(self, tag):
problems = self.get_all_problems()
datas = []
for k in problems:
try:
j = json.loads(problems[k])
tags = j['data']['question']['topicTags']
paid_only = j['data']['question']['paid_only']
if len(tags) > 0:
for t in tags:
if t['slug'] == tag and paid_only == False:
datas.append(j)
break
except Exception as e:
print("unknow key:", k, e)
pass
return datas
def get_all_problems(self):
d = {}
for k, v in self.dict.iteritems():
if k.startswith("leetcode_") and k[9].isdigit():
d[k] = v
return d
def save_problem(self, id, content):
self.dict[leetcode_key(id)] = content
self.dict.commit()
def get_problem_content(self, id):
v = self.dict.get(leetcode_key(id))
return v
def get_level(self, id):
content = self.get_problem_content(id)
if content == None:
print("title not exist:", id)
return str(id)
j = json.loads(content)
return j['data']['question']['difficulty']
def check_finish(self, id):
for k in self.finished:
if k.startswith(id+"."):
return True
return False
def check_flask(self, id):
for k in self.flasks:
if k.startswith(id+"."):
return k
return ""
def get_problem(self, id):
content = self.get_problem_content(id)
if content == None:
print("title not exist:", id)
return str(id)
j = json.loads(content)
return j
def get_title(self, id):
content = self.get_problem_content(id)
if content == None:
print("title not exist:", id)
return str(id)
j = json.loads(content)
return j['data']['question']['translatedTitle']
def get_title_with_slug(self, id, slug, paid_only):
content = self.get_problem_content(id)
if content:
j = json.loads(content)
return j['data']['question']['translatedTitle']
session = requests.Session()
headers = {'User-Agent': user_agent, 'Connection':
'keep-alive', 'Content-Type': 'application/json',
'Referer': withUrl('problems/') + slug}
url = withUrl('graphql')
params = {'operationName': "getQuestionDetail",
'variables': {'titleSlug': slug},
'query': '''query getQuestionDetail($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
questionTitle
questionTitleSlug
translatedTitle
translatedContent
content
difficulty
stats
similarQuestions
categoryTitle
topicTags {
name
slug
}
}
}'''}
json_data = json.dumps(params).encode('utf8')
resp = session.post(url, data=json_data, headers=headers, timeout=10)
content = resp.text
j = json.loads(content)
j['data']['question']['paid_only'] = paid_only
self.save_problem(id, json.dumps(j))
return j['data']['question']['translatedTitle']
def get_update_db_time(self):
t = self.dict.get("leetcode_update_db_time")
if t == None:
return 0
return t
def save_update_db_time(self):
self.dict["leetcode_update_db_time"] = util.now()
def update_db(self):
t = self.get_update_db_time()
if util.now()-t < 24*3600*1000:
return
url = withUrl("api/problems/all/")
f = urllib.request.urlopen(url)
content = f.read().decode('utf-8')
qlist = json.loads(content)
try:
for q in qlist['stat_status_pairs']:
id = q['stat']['question_id']
front_id = q['stat']['frontend_question_id']
if is_int(front_id):
id = int(front_id)
level = q['difficulty']['level']
slug = q['stat']['question__title_slug']
paid_only = q['paid_only']
title = self.get_title_with_slug(id, slug, paid_only)
print("id:", id, level, title)
self.save_update_db_time()
except Exception as e:
print("leetcode update db error:", e)
pass
|
from js9 import j
app = j.tools.prefab._getBaseAppClass()
class PrefabSynapsebot(app):
NAME = "synapse-bot"
def _init(self):
self.bot_repo = "https://github.com/arahmanhamdy/Matrix-NEB.git"
self.server_path = "{{CODEDIR}}/matrixbot"
def build(self, reset=False):
if self.doneCheck('build', reset):
return
# Install prerequisite libraries
self.prefab.system.package.mdupdate()
needed_packages = ["python3-pip", "python3-setuptools"]
for package in needed_packages:
self.prefab.system.package.ensure(package)
# Clone bot server repo
self.prefab.tools.git.pullRepo(self.bot_repo, dest=self.server_path)
# Install prerequisite python libs
cmd = """
cd {server_path}
python3 setup.py install
""".format(server_path=self.server_path)
self.prefab.core.run(cmd)
self.doneSet('build')
def install(self, matrix_url, bot_user, admins=None, start=True, reset=False):
"""
Build and Install synapse matrix bot server
:param matrix_url: Synapse matrix url
:param bot_user: the full username of bot user (i.e @gigbot:matrix.aydo.com)
:param admins:list: list of full username of admins of the bot (i.e ["@root:matrix.aydo.com"])
:param start: start after install
:param reset: reset building
"""
self.build(reset=reset)
# Configure synapse bot server
self._configure(matrix_url, bot_user, admins)
if start:
self.start()
def _configure(self, matrix_url, bot_user, admins=None):
import requests
if not admins:
admins = []
# create bot user
bot = {"username": bot_user, "password": "", "auth": {"type": "m.login.dummy"}}
res = requests.post("{}/_matrix/client/r0/register".format(matrix_url), json=bot)
token = res.json()['access_token']
# configure bot server to use the bot user
config_file_path = "{}/botserver.conf".format(self.server_path)
config_data = {
"url": matrix_url,
"case_insensitive": True,
"token": token,
"admins": admins,
"user": bot_user
}
config_data = j.data.serializer.json.dumps(config_data)
self.prefab.core.file_write(config_file_path, config_data)
def start(self):
cmd = 'python3 "{}" -c "{}"'.format(self.server_path + "/neb.py", self.server_path + "/botserver.conf")
self.prefab.system.processmanager.get().ensure("matrix-bot", cmd, wait=5, expect="Running on")
def stop(self):
self.prefab.system.processmanager.get().stop("matrix-bot")
|
from mongoengine import connect
from models import ImmutableProductData, Materials, PieceOfClothing
connect('cf-id-example', host='mongodb://localhost', alias='default')
def init_db():
test_piece_of_clothing = PieceOfClothing(
immutable_product_data=ImmutableProductData(sku="BBBBB", brand="ChangedClothing"),
materials=Materials(material_type='LyoCell', name='LyoTex'))
test_piece_of_clothing.immutable_product_data.save()
test_piece_of_clothing.materials.save()
test_piece_of_clothing.save()
|
from .client import RSSFeed |
from tkinter import *
from infrastructure import DemoWindow, demo_path
import tkinter.messagebox as messagebox
SCROLLAREA_HEIGHT=1024
SCROLLAREA_WIDTH=1280
ROW_NUM=10
COLUMN_NUM=16
ROW_PAD=30
COLUMN_PAD=30
BOX_HEIGHT=(SCROLLAREA_HEIGHT/COLUMN_NUM) - COLUMN_PAD
BOX_WIDTH =(SCROLLAREA_WIDTH/ROW_NUM) - ROW_PAD
CURRENTBOX_COLOR = 'cyan'
BOX_COLOR = 'lightgray'
class ScrollableCanvas( Frame ):
"""A canvas with scroll bars. A number of
box is drawn inside the canvas to fill it.
"""
def __init__(self, master):
Frame.__init__(self,master)
# create and pack the canvas and the two scrollbars
self.rowconfigure(0, weight=1)
self.columnconfigure(0,weight=1)
self.canvas = Canvas(self, relief=SUNKEN, border=2,
scrollregion=(0,0,1280,1024) )
self.vbar=Scrollbar(self, command=self.canvas.yview)
self.hbar=Scrollbar(self, orient='horizontal',
command=self.canvas.xview )
self.canvas.configure({'xscrollcommand':self.hbar.set,
'yscrollcommand':self.vbar.set} )
self.canvas.grid(row=0,column=0, sticky='nsew' )
self.vbar.grid(row=0,column=1,sticky='ns' )
self.hbar.grid(row=1,column=0,sticky='ew' )
self.fill_canvas()
def fill_canvas(self):
for r in range(0,ROW_NUM):
x = ROW_PAD+(r*(BOX_WIDTH+ROW_PAD))
for c in range(0,COLUMN_NUM):
y = COLUMN_PAD+c*(BOX_HEIGHT+COLUMN_PAD)
name = '%d,%d'% (c,r)
# Note : if fill option was omitted, the rectangle
# would have created empty. This means tha only the
# outline would have generated events
id = self.canvas.create_rectangle(x, y,
x+BOX_WIDTH, y+BOX_HEIGHT,
fill=BOX_COLOR, tags=('box', name ) )
self.canvas.create_text(x+BOX_WIDTH/2,y+BOX_HEIGHT/2,
anchor='c', text=name,
tags='text' )
self.canvas.tag_bind('all', '<Any-Enter>', self.enter_callback )
self.canvas.tag_bind('all', '<Any-Leave>', self.leave_callback )
self.canvas.tag_bind('all', '<1>', self.select_callback )
self.canvas.tag_bind('all', '<3>', self.scanmark_callback )
self.canvas.tag_bind('all', '<B3-Motion>', self.scandragto_callback )
def get_current_box(self):
id, = self.canvas.find_withtag('current')
tags = self.canvas.gettags('current')
## print 'tags of selected ='+`tags`
if 'text' in tags: # the current item is the text in the box
id = self.canvas.find_withtag(self.canvas.itemcget(id,'text'))
return id
def enter_callback(self, event):
id = self.get_current_box()
self.canvas.itemconfigure(id, fill=CURRENTBOX_COLOR)
def leave_callback(self,event):
id = self.get_current_box()
self.canvas.itemconfigure(id, fill=BOX_COLOR)
def scanmark_callback( self,event ):
self.canvas.scan_mark(event.x, event.y)
def scandragto_callback(self,event):
self.canvas.scan_dragto(event.x, event.y)
def select_callback( self,event):
id = self.get_current_box()
tags=self.canvas.gettags('current')
for t in tags:
if t not in ('current','box'):
box_name=t
messagebox.showinfo(
title='Box clicked',
message='You have clicked on box:'+box_name)
class ScrollCanvasDemo ( DemoWindow ):
def __init__(self):
l = """This window displays a canvas widget that can be
scrolled either using the scrollbars or by dragging with
button 2 in the canvas. If you click button 1 on one of
the rectangles, its indices will displayed.
You can also drag with button 3 to scroll the canvas.
"""
DemoWindow.__init__(self,l,demo_path('canvasscroll.py') )
self.canvas = ScrollableCanvas(self)
self.canvas.pack(expand='Y', fill='both' )
runDemo = ScrollCanvasDemo
## ----------------------------------------------------------------------------
if __name__ == '__main__':
demo = ScrollCanvasDemo()
mainloop()
|
from parser import parse_indentation
def main(rawdata):
parse_indentation(rawdata)
|
from ..constants import DatabaseType, EntityIdStr
def create_ner_type_anatomy(
id: str,
name: str,
synonym: str
) -> dict:
return {
EntityIdStr.ANATOMY.value: id,
'id_type': DatabaseType.MESH.value,
'name': name,
'synonym': synonym
}
def create_ner_type_chemical(id: str, name: str, synonym: str) -> dict:
return {
EntityIdStr.CHEMICAL.value: id,
'id_type': DatabaseType.CHEBI.value,
'name': name,
'synonym': synonym,
}
def create_ner_type_compound(id: str, name: str, synonym: str) -> dict:
return {
EntityIdStr.COMPOUND.value: id,
'id_type': DatabaseType.BIOCYC.value,
'name': name,
'synonym': synonym,
}
def create_ner_type_disease(id: str, name: str, synonym: str) -> dict:
return {
EntityIdStr.DISEASE.value: id,
'id_type': DatabaseType.MESH.value,
'name': name,
'synonym': synonym,
}
def create_ner_type_food(
id: str,
name: str,
synonym: str
) -> dict:
return {
EntityIdStr.FOOD.value: id,
'id_type': DatabaseType.MESH.value,
'name': name,
'synonym': synonym
}
def create_ner_type_gene(
name: str,
synonym: str,
data_source: str = DatabaseType.NCBI_GENE.value
) -> dict:
return {
'id_type': data_source,
'name': name,
'synonym': synonym,
}
def create_ner_type_phenomena(
id: str,
name: str,
synonym: str
) -> dict:
return {
EntityIdStr.PHENOMENA.value: id,
'id_type': DatabaseType.MESH.value,
'name': name,
'synonym': synonym,
}
def create_ner_type_phenotype(
id: str,
name: str,
synonym: str
) -> dict:
return {
EntityIdStr.PHENOTYPE.value: id,
'id_type': DatabaseType.CUSTOM.value,
'name': name,
'synonym': synonym,
}
def create_ner_type_protein(name: str, synonym: str) -> dict:
# changed protein_id to protein_name for now (JIRA LL-671)
# will eventually change back to protein_id
return {
EntityIdStr.PROTEIN.value: name,
'id_type': DatabaseType.UNIPROT.value,
'name': name,
'synonym': synonym,
}
def create_ner_type_species(
id: str,
name: str,
synonym: str,
category: str = 'Uncategorized',
) -> dict:
return {
EntityIdStr.SPECIES.value: id,
'id_type': DatabaseType.NCBI_TAXONOMY.value,
'category': category,
'name': name,
'synonym': synonym,
}
"""
None LMDB related entities
"""
def create_ner_type_company(id: str, name: str, synonym: str) -> dict:
return {
EntityIdStr.COMPANY.value: id,
'id_type': '',
'name': name,
'synonym': synonym
}
def create_ner_type_entity(id: str, name: str, synonym: str) -> dict:
return {
EntityIdStr.ENTITY.value: id,
'id_type': '',
'name': name,
'synonym': synonym
}
def create_ner_type_lab_sample(id: str, name: str, synonym: str) -> dict:
return {
EntityIdStr.LAB_SAMPLE.value: id,
'id_type': '',
'name': name,
'synonym': synonym
}
def create_ner_type_lab_strain(id: str, name: str, synonym: str) -> dict:
return {
EntityIdStr.LAB_STRAIN.value: id,
'id_type': '',
'name': name,
'synonym': synonym
}
|
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import warnings
import tempfile
import joblib
import neptune
import pandas as pd
warnings.filterwarnings('ignore')
__all__ = [
'concat_experiments_on_channel',
'extract_project_progress_info',
'get_channel_columns',
'get_parameter_columns',
'get_property_columns',
'get_system_columns',
'strip_prefices',
'pickle_and_log_artifact',
'get_pickled_artifact'
]
def concat_experiments_on_channel(experiments, channel_name):
"""Combines channel values from experiments into one dataframe.
This function helps to compare channel values from a list of experiments
by combining them in a dataframe. E.g: Say we want to extract the `log_loss`
channel values for a list of experiments. The resulting dataframe will have
['id','x_log_loss','y_log_loss'] columns.
Args:
experiments(list): list of `neptune.experiments.Experiment` objects.
channel_name(str): name of the channel for which we want to extract values.
Returns:
`pandas.DataFrame`: Dataframe of ['id','x_CHANNEL_NAME','y_CHANNEL_NAME']
values concatenated from a list of experiments.
Examples:
Instantiate a session::
from neptune.sessions import Session
session = Session()
Fetch a project and a list of experiments::
project = session.get_projects('neptune-ai')['neptune-ai/Salt-Detection']
experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000)
Construct a channel value dataframe::
from neptunecontrib.api.utils import concat_experiments_on_channel
compare_df = concat_experiments_on_channel(experiments,'unet_0 epoch_val iout loss')
Note:
If an experiment in the list of experiments does not contain the channel with a specified channel_name
it will be omitted.
"""
combined_df = []
for experiment in experiments:
if channel_name in experiment.get_channels().keys():
channel_df = experiment.get_numeric_channels_values(channel_name)
channel_df['id'] = experiment.id
combined_df.append(channel_df)
combined_df = pd.concat(combined_df, axis=0)
return combined_df
def extract_project_progress_info(leadearboard, metric_colname, time_colname='finished'):
"""Extracts the project progress information from the experiment view.
This function takes the experiment view (leaderboard) and extracts the information
that is important for analysing the project progress. It creates additional columns
`metric` (actual experiment metric), `metric_best` (best metric score to date)),
`running_time_day` (total amount of experiment running time for a given day in hours),
'experiment_count_day' (total number of experiments ran in a given day).
This function is usually used with the `plot_project_progress` from `neptunecontrib.viz.projects`.
Args:
leadearboard(`pandas.DataFrame`): Dataframe containing the experiment view of the project.
It can be extracted via `project.get_leaderboard()`.
metric_colname(str): name of the column containing the metric of interest.
time_colname(str): name of the column containing the timestamp. It can be either `finished`
or `created`. Default is 'finished'.
Returns:
`pandas.DataFrame`: Dataframe of ['id', 'metric', 'metric_best', 'running_time',
'running_time_day', 'experiment_count_day', 'owner', 'tags', 'timestamp', 'timestamp_day']
columns.
Examples:
Instantiate a session::
from neptune.sessions import Session
session = Session()
Fetch a project and the experiment view of that project::
project = session.get_projects('neptune-ai')['neptune-ai/Salt-Detection']
leaderboard = project.get_leaderboard()
Create a progress info dataframe::
from neptunecontrib.api.utils import extract_project_progress_info
progress_df = extract_project_progress_info(leadearboard,
metric_colname='channel_IOUT',
time_colname='finished')
"""
system_columns = ['id', 'owner', 'running_time', 'tags']
progress_columns = system_columns + [time_colname, metric_colname]
progress_df = leadearboard[progress_columns]
progress_df.columns = ['id', 'owner', 'running_time', 'tags'] + ['timestamp', 'metric']
progress_df = _prep_time_column(progress_df)
progress_df = _prep_metric_column(progress_df)
progress_df = _get_daily_running_time(progress_df)
progress_df = _get_daily_experiment_counts(progress_df)
progress_df = _get_current_best(progress_df)
progress_df = progress_df[
['id', 'metric', 'metric_best', 'running_time', 'running_time_day', 'experiment_count_day',
'owner', 'tags', 'timestamp', 'timestamp_day']]
return progress_df
def get_channel_columns(columns):
"""Filters leaderboard columns to get the channel column names.
Args:
columns(iterable): Iterable of leaderboard column names.
Returns:
list: A list of channel column names.
"""
return [col for col in columns if col.startswith('channel_')]
def get_parameter_columns(columns):
"""Filters leaderboard columns to get the parameter column names.
Args:
columns(iterable): Iterable of leaderboard column names.
Returns:
list: A list of channel parameter names.
"""
return [col for col in columns if col.startswith('parameter_')]
def get_property_columns(columns):
"""Filters leaderboard columns to get the property column names.
Args:
columns(iterable): Iterable of leaderboard column names.
Returns:
list: A list of channel property names.
"""
return [col for col in columns if col.startswith('property_')]
def get_system_columns(columns):
"""Filters leaderboard columns to get the system column names.
Args:
columns(iterable): Iterable of leaderboard column names.
Returns:
list: A list of channel system names.
"""
excluded_prefices = ['channel_', 'parameter_', 'property_']
return [col for col in columns if not any([col.startswith(prefix) for
prefix in excluded_prefices])]
def strip_prefices(columns, prefices):
"""Filters leaderboard columns to get the system column names.
Args:
columns(iterable): Iterable of leaderboard column names.
prefices(list): List of prefices to strip. You can choose one of
['channel_', 'parameter_', 'property_']
Returns:
list: A list of clean column names.
"""
new_columns = []
for col in columns:
for prefix in prefices:
if col.startswith(prefix):
col = col.replace(prefix, '')
new_columns.append(col)
return new_columns
def get_filepaths(dirpath='.', extensions=None):
"""Creates a list of all the files with selected extensions.
Args:
dirpath(str): Folder from which all files with given extensions should be added to list.
extensions(list(str) or None): All extensions with which files should be added to the list.
Returns:
list: A list of filepaths with given extensions that are in the directory or subdirecotries.
Examples:
Initialize Neptune::
import neptune
from neptunecontrib.versioning.data import log_data_version
neptune.init('USER_NAME/PROJECT_NAME')
Create experiment and track all .py files from given directory and subdirs::
with neptune.create_experiment(upload_source_files=get_filepaths(extensions=['.py'])):
neptune.send_metric('score', 0.97)
"""
msg = """get_filepaths() is deprecated.
Starting from neptune-client==4.9 you can pass ['**/*.py*', '**/*.yaml*', '**/*.yml*']
to upload_source_files argument to upload all files with given extensions recursively.
Read more https://docs.neptune.ai/neptune-client/docs/project.html
get_filepaths() will be removed in future releases.
"""
warnings.warn(msg, DeprecationWarning)
if not extensions:
extensions = ['.py', '.yaml', 'yml']
files = []
for r, _, f in os.walk(dirpath):
for file in f:
if any(file.endswith(ext) for ext in extensions):
files.append(os.path.join(r, file))
return files
def pickle_and_log_artifact(obj, filename, experiment=None):
"""Logs picklable object to Neptune.
Pickles and logs your object to Neptune under specified filename.
Args:
obj: Picklable object.
filename(str): filename under which object will be saved.
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
Examples:
Initialize Neptune::
import neptune
neptune.init('USER_NAME/PROJECT_NAME')
Create RandomForest object and log to Neptune::
from sklearn.ensemble import RandomForestClassifier
from neptunecontrib.api import pickle_and_log_artifact
with neptune.create_experiment():
rf = RandomForestClassifier()
pickle_and_log_artifact(rf, 'rf')
"""
_exp = experiment if experiment else neptune
with tempfile.TemporaryDirectory() as d:
filename = os.path.join(d, filename)
joblib.dump(obj, filename)
_exp.send_artifact(filename)
def get_pickled_artifact(experiment, filename):
"""Downloads pickled artifact object from Neptune and returns a Python object.
Downloads the pickled object from artifacts of given experiment,
loads them and returns a Python object.
Args:
experiment(`neptune.experiments.Experiment`): Neptune experiment.
filename(str): filename under which object was saved in Neptune.
Examples:
Initialize Neptune::
import neptune
session = neptune.sessions.Session()
project = session.get_project('USER_NAME/PROJECT_NAME')
Choose Neptune experiment::
experiment = project.get_experiments(id=['PRO-101'])[0]
Get your pickled object from experiment articats::
from neptunecontrib.monitoring.utils import get_artifact
results = get_pickled_artifact(experiment, 'results.pkl')
"""
with tempfile.TemporaryDirectory() as d:
experiment.download_artifact(filename, d)
full_path = os.path.join(d, filename)
artifact = joblib.load(full_path)
return artifact
def _prep_time_column(progress_df):
progress_df['timestamp'] = pd.to_datetime(progress_df['timestamp'])
progress_df.sort_values('timestamp', inplace=True)
progress_df['timestamp_day'] = [d.date() for d in progress_df['timestamp']]
return progress_df
def _prep_metric_column(progress_df):
progress_df['metric'] = progress_df['metric'].astype(float)
progress_df.dropna(subset=['metric'], how='all', inplace=True)
return progress_df
def _get_daily_running_time(progress_df):
daily_counts = progress_df.groupby('timestamp_day').sum()['running_time'].reset_index()
daily_counts.columns = ['timestamp_day', 'running_time_day']
progress_df = pd.merge(progress_df, daily_counts, on='timestamp_day')
return progress_df
def _get_daily_experiment_counts(progress_df):
daily_counts = progress_df.groupby('timestamp_day').count()['metric'].reset_index()
daily_counts.columns = ['timestamp_day', 'experiment_count_day']
progress_df = pd.merge(progress_df, daily_counts, on='timestamp_day')
return progress_df
def _get_current_best(progress_df):
current_best = progress_df['metric'].cummax()
current_best = current_best.fillna(method='bfill')
progress_df['metric_best'] = current_best
return progress_df
|
import click
from kfk.commands.main import kfk
from kfk.commons import print_missing_options_for_command, create_temp_file
from kfk.kubectl_command_builder import Kubectl
from kfk.config import *
from kfk.constants import SpecialTexts
@click.option('-n', '--namespace', help='Namespace to use', required=True)
@click.option('--uninstall', 'is_uninstall', help='Uninstalls Strimzi Kafka Operator', is_flag=True)
@click.option('--install', 'is_install', help='Installs Strimzi Kafka Operator', is_flag=True)
@kfk.command()
def operator(is_install, is_uninstall, namespace):
"""Installs/Uninstalls Strimzi Kafka Operator"""
if is_install:
for directory_name, dirs, files in os.walk("{strimzi_path}/install/cluster-operator/".format(
strimzi_path=STRIMZI_PATH)):
for file_name in files:
file_path = os.path.join(directory_name, file_name)
if SpecialTexts.OPERATOR_ROLE_BINDING in file_name:
with open(file_path) as file:
stream = file.read().replace(SpecialTexts.OPERATOR_MY_PROJECT, namespace)
temp_file = create_temp_file(stream)
file_path = temp_file.name
os.system(Kubectl().apply().from_file(file_path).namespace(namespace).build())
elif is_uninstall:
# TODO: refactor here
for directory_name, dirs, files in os.walk("{strimzi_path}/install/cluster-operator/".format(
strimzi_path=STRIMZI_PATH)):
for file_name in files:
file_path = os.path.join(directory_name, file_name)
if SpecialTexts.OPERATOR_ROLE_BINDING in file_name:
with open(file_path) as file:
stream = file.read().replace(SpecialTexts.OPERATOR_MY_PROJECT, namespace)
temp_file = create_temp_file(stream)
file_path = temp_file.name
os.system(Kubectl().delete().from_file(file_path).namespace(namespace).build())
else:
print_missing_options_for_command("operator")
|
# tests/test_provider_davidji99_herokux.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:18:42 UTC)
def test_provider_import():
import terrascript.provider.davidji99.herokux
def test_resource_import():
from terrascript.resource.davidji99.herokux import herokux_app_container_release
from terrascript.resource.davidji99.herokux import herokux_app_github_integration
from terrascript.resource.davidji99.herokux import herokux_app_webhook
from terrascript.resource.davidji99.herokux import herokux_connect_mappings
from terrascript.resource.davidji99.herokux import herokux_data_connector
from terrascript.resource.davidji99.herokux import herokux_formation_alert
from terrascript.resource.davidji99.herokux import herokux_formation_autoscaling
from terrascript.resource.davidji99.herokux import herokux_kafka_consumer_group
from terrascript.resource.davidji99.herokux import herokux_kafka_mtls_iprule
from terrascript.resource.davidji99.herokux import herokux_kafka_topic
from terrascript.resource.davidji99.herokux import herokux_oauth_authorization
from terrascript.resource.davidji99.herokux import (
herokux_pipeline_ephemeral_apps_config,
)
from terrascript.resource.davidji99.herokux import (
herokux_pipeline_github_integration,
)
from terrascript.resource.davidji99.herokux import herokux_pipeline_member
from terrascript.resource.davidji99.herokux import herokux_postgres_backup_schedule
from terrascript.resource.davidji99.herokux import (
herokux_postgres_connection_pooling,
)
from terrascript.resource.davidji99.herokux import herokux_postgres_credential
from terrascript.resource.davidji99.herokux import herokux_postgres_data_link
from terrascript.resource.davidji99.herokux import herokux_postgres_dataclip
from terrascript.resource.davidji99.herokux import (
herokux_postgres_dataclip_team_association,
)
from terrascript.resource.davidji99.herokux import (
herokux_postgres_dataclip_user_association,
)
from terrascript.resource.davidji99.herokux import (
herokux_postgres_maintenance_window,
)
from terrascript.resource.davidji99.herokux import herokux_postgres_mtls
from terrascript.resource.davidji99.herokux import herokux_postgres_mtls_certificate
from terrascript.resource.davidji99.herokux import herokux_postgres_mtls_iprule
from terrascript.resource.davidji99.herokux import herokux_postgres_settings
from terrascript.resource.davidji99.herokux import herokux_privatelink
from terrascript.resource.davidji99.herokux import herokux_redis_config
from terrascript.resource.davidji99.herokux import herokux_redis_maintenance_window
from terrascript.resource.davidji99.herokux import herokux_scheduler_job
from terrascript.resource.davidji99.herokux import herokux_shield_private_space
def test_datasource_import():
from terrascript.data.davidji99.herokux import herokux_addons
from terrascript.data.davidji99.herokux import herokux_kafka_mtls_iprules
from terrascript.data.davidji99.herokux import herokux_postgres_mtls_certificate
from terrascript.data.davidji99.herokux import herokux_registry_image
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.davidji99.herokux
#
# t = terrascript.provider.davidji99.herokux.herokux()
# s = str(t)
#
# assert 'https://github.com/davidji99/terraform-provider-herokux' in s
# assert '0.30.3' in s
|
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
# DATA IRIS
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
print('Class labels:', np.unique(y))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
print('Label count:', np.bincount(y), np.bincount(y_train), np.bincount(y_test))
sc = StandardScaler()
sc.fit(X_train)
X_train_std, X_test_std = sc.transform(X_train), sc.transform(X_test)
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
|
# AUTOGENERATED: DO NOT EDIT
# Last update date: 2022-02-05 01:36:27.062469
from ctypes import *
from utils import *
MAXWELL_CHANNEL_GPFIFO_A: int = 0xB06F
NVB06F_NUMBER_OF_SUBCHANNELS: int = 0x8
NVB06F_SET_OBJECT: int = 0x0
NVB06F_SET_OBJECT_ENGINE_SW: int = 0x1F
NVB06F_ILLEGAL: int = 0x4
NVB06F_NOP: int = 0x8
NVB06F_SEMAPHOREA: int = 0x10
NVB06F_SEMAPHOREB: int = 0x14
NVB06F_SEMAPHOREC: int = 0x18
NVB06F_SEMAPHORED: int = 0x1C
NVB06F_SEMAPHORED_OPERATION_ACQUIRE: int = 0x1
NVB06F_SEMAPHORED_OPERATION_RELEASE: int = 0x2
NVB06F_SEMAPHORED_OPERATION_ACQ_GEQ: int = 0x4
NVB06F_SEMAPHORED_OPERATION_ACQ_AND: int = 0x8
NVB06F_SEMAPHORED_OPERATION_REDUCTION: int = 0x10
NVB06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED: int = 0x0
NVB06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED: int = 0x1
NVB06F_SEMAPHORED_RELEASE_WFI_EN: int = 0x0
NVB06F_SEMAPHORED_RELEASE_WFI_DIS: int = 0x1
NVB06F_SEMAPHORED_RELEASE_SIZE_16BYTE: int = 0x0
NVB06F_SEMAPHORED_RELEASE_SIZE_4BYTE: int = 0x1
NVB06F_SEMAPHORED_REDUCTION_MIN: int = 0x0
NVB06F_SEMAPHORED_REDUCTION_MAX: int = 0x1
NVB06F_SEMAPHORED_REDUCTION_XOR: int = 0x2
NVB06F_SEMAPHORED_REDUCTION_AND: int = 0x3
NVB06F_SEMAPHORED_REDUCTION_OR: int = 0x4
NVB06F_SEMAPHORED_REDUCTION_ADD: int = 0x5
NVB06F_SEMAPHORED_REDUCTION_INC: int = 0x6
NVB06F_SEMAPHORED_REDUCTION_DEC: int = 0x7
NVB06F_SEMAPHORED_FORMAT_SIGNED: int = 0x0
NVB06F_SEMAPHORED_FORMAT_UNSIGNED: int = 0x1
NVB06F_NON_STALL_INTERRUPT: int = 0x20
NVB06F_FB_FLUSH: int = 0x24
NVB06F_MEM_OP_C: int = 0x30
NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE: int = 0x0
NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL: int = 0x1
NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE: int = 0x0
NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE: int = 0x1
NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_VID_MEM: int = 0x0
NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT: int = 0x2
NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT: int = 0x3
NVB06F_MEM_OP_D: int = 0x34
NVB06F_MEM_OP_D_OPERATION_MEMBAR: int = 0x5
NVB06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE: int = 0x9
NVB06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE: int = 0xD
NVB06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE: int = 0xE
NVB06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS: int = 0xF
NVB06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY: int = 0x10
NVB06F_SET_REFERENCE: int = 0x50
NVB06F_WFI: int = 0x78
NVB06F_WFI_SCOPE_CURRENT_SCG_TYPE: int = 0x0
NVB06F_WFI_SCOPE_ALL: int = 0x1
NVB06F_CRC_CHECK: int = 0x7C
NVB06F_YIELD: int = 0x80
NVB06F_YIELD_OP_NOP: int = 0x0
NVB06F_YIELD_OP_PBDMA_TIMESLICE: int = 0x1
NVB06F_YIELD_OP_RUNLIST_TIMESLICE: int = 0x2
NVB06F_YIELD_OP_TSG: int = 0x3
NVB06F_GP_ENTRY__SIZE: int = 0x8
NVB06F_GP_ENTRY0_FETCH_UNCONDITIONAL: int = 0x0
NVB06F_GP_ENTRY0_FETCH_CONDITIONAL: int = 0x1
NVB06F_GP_ENTRY1_PRIV_USER: int = 0x0
NVB06F_GP_ENTRY1_PRIV_KERNEL: int = 0x1
NVB06F_GP_ENTRY1_LEVEL_MAIN: int = 0x0
NVB06F_GP_ENTRY1_LEVEL_SUBROUTINE: int = 0x1
NVB06F_GP_ENTRY1_SYNC_PROCEED: int = 0x0
NVB06F_GP_ENTRY1_SYNC_WAIT: int = 0x1
NVB06F_GP_ENTRY1_OPCODE_NOP: int = 0x0
NVB06F_GP_ENTRY1_OPCODE_ILLEGAL: int = 0x1
NVB06F_GP_ENTRY1_OPCODE_GP_CRC: int = 0x2
NVB06F_GP_ENTRY1_OPCODE_PB_CRC: int = 0x3
NVB06F_DMA_TERT_OP_GRP0_INC_METHOD: int = 0x0
NVB06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK: int = 0x1
NVB06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK: int = 0x2
NVB06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK: int = 0x3
NVB06F_DMA_TERT_OP_GRP2_NON_INC_METHOD: int = 0x0
NVB06F_DMA_SEC_OP_GRP0_USE_TERT: int = 0x0
NVB06F_DMA_SEC_OP_INC_METHOD: int = 0x1
NVB06F_DMA_SEC_OP_GRP2_USE_TERT: int = 0x2
NVB06F_DMA_SEC_OP_NON_INC_METHOD: int = 0x3
NVB06F_DMA_SEC_OP_IMMD_DATA_METHOD: int = 0x4
NVB06F_DMA_SEC_OP_ONE_INC: int = 0x5
NVB06F_DMA_SEC_OP_RESERVED6: int = 0x6
NVB06F_DMA_SEC_OP_END_PB_SEGMENT: int = 0x7
NVB06F_DMA_INCR_OPCODE_VALUE: int = 0x1
NVB06F_DMA_NONINCR_OPCODE_VALUE: int = 0x3
NVB06F_DMA_ONEINCR_OPCODE_VALUE: int = 0x5
NVB06F_DMA_NOP: int = 0x0
NVB06F_DMA_IMMD_OPCODE_VALUE: int = 0x4
NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE: int = 0x1
NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE: int = 0x2
NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE: int = 0x3
NVB06F_DMA_ENDSEG_OPCODE_VALUE: int = 0x7
NVB06F_DMA_OPCODE3_NONE: int = 0x0
NVB06F_DMA_OPCODE_METHOD: int = 0x0
NVB06F_DMA_OPCODE_NONINC_METHOD: int = 0x2
def NVB06F_SET_OBJECT_NVCLASS(value: int) -> int:
return set_bits(0, 15, value)
def NVB06F_SET_OBJECT_ENGINE(value: int) -> int:
return set_bits(16, 4, value)
def NVB06F_ILLEGAL_HANDLE(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_NOP_HANDLE(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_SEMAPHOREA_OFFSET_UPPER(value: int) -> int:
return set_bits(0, 7, value)
def NVB06F_SEMAPHOREB_OFFSET_LOWER(value: int) -> int:
return set_bits(2, 29, value)
def NVB06F_SEMAPHOREC_PAYLOAD(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_SEMAPHORED_OPERATION(value: int) -> int:
return set_bits(0, 4, value)
def NVB06F_SEMAPHORED_ACQUIRE_SWITCH(value: int) -> int:
return set_bits(12, 0, value)
def NVB06F_SEMAPHORED_RELEASE_WFI(value: int) -> int:
return set_bits(20, 0, value)
def NVB06F_SEMAPHORED_RELEASE_SIZE(value: int) -> int:
return set_bits(24, 0, value)
def NVB06F_SEMAPHORED_REDUCTION(value: int) -> int:
return set_bits(27, 3, value)
def NVB06F_SEMAPHORED_FORMAT(value: int) -> int:
return set_bits(31, 0, value)
def NVB06F_NON_STALL_INTERRUPT_HANDLE(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_FB_FLUSH_HANDLE(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_MEM_OP_C_OPERAND_LOW(value: int) -> int:
return set_bits(2, 29, value)
def NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB(value: int) -> int:
return set_bits(0, 0, value)
def NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC(value: int) -> int:
return set_bits(1, 0, value)
def NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET(value: int) -> int:
return set_bits(10, 1, value)
def NVB06F_MEM_OP_C_TLB_INVALIDATE_ADDR_LO(value: int) -> int:
return set_bits(12, 19, value)
def NVB06F_MEM_OP_D_OPERAND_HIGH(value: int) -> int:
return set_bits(0, 7, value)
def NVB06F_MEM_OP_D_OPERATION(value: int) -> int:
return set_bits(27, 4, value)
def NVB06F_MEM_OP_D_TLB_INVALIDATE_ADDR_HI(value: int) -> int:
return set_bits(0, 7, value)
def NVB06F_SET_REFERENCE_COUNT(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_WFI_SCOPE(value: int) -> int:
return set_bits(0, 0, value)
def NVB06F_CRC_CHECK_VALUE(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_YIELD_OP(value: int) -> int:
return set_bits(0, 1, value)
def NVB06F_GP_ENTRY0_FETCH(value: int) -> int:
return set_bits(0, 0, value)
def NVB06F_GP_ENTRY0_GET(value: int) -> int:
return set_bits(2, 29, value)
def NVB06F_GP_ENTRY0_OPERAND(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_GP_ENTRY1_GET_HI(value: int) -> int:
return set_bits(0, 7, value)
def NVB06F_GP_ENTRY1_PRIV(value: int) -> int:
return set_bits(8, 0, value)
def NVB06F_GP_ENTRY1_LEVEL(value: int) -> int:
return set_bits(9, 0, value)
def NVB06F_GP_ENTRY1_LENGTH(value: int) -> int:
return set_bits(10, 20, value)
def NVB06F_GP_ENTRY1_SYNC(value: int) -> int:
return set_bits(31, 0, value)
def NVB06F_GP_ENTRY1_OPCODE(value: int) -> int:
return set_bits(0, 7, value)
def NVB06F_DMA_METHOD_ADDRESS_OLD(value: int) -> int:
return set_bits(2, 10, value)
def NVB06F_DMA_METHOD_ADDRESS(value: int) -> int:
return set_bits(0, 11, value)
def NVB06F_DMA_SUBDEVICE_MASK(value: int) -> int:
return set_bits(4, 11, value)
def NVB06F_DMA_METHOD_SUBCHANNEL(value: int) -> int:
return set_bits(13, 2, value)
def NVB06F_DMA_TERT_OP(value: int) -> int:
return set_bits(16, 1, value)
def NVB06F_DMA_METHOD_COUNT_OLD(value: int) -> int:
return set_bits(18, 10, value)
def NVB06F_DMA_METHOD_COUNT(value: int) -> int:
return set_bits(16, 12, value)
def NVB06F_DMA_IMMD_DATA(value: int) -> int:
return set_bits(16, 12, value)
def NVB06F_DMA_SEC_OP(value: int) -> int:
return set_bits(29, 2, value)
def NVB06F_DMA_INCR_ADDRESS(value: int) -> int:
return set_bits(0, 11, value)
def NVB06F_DMA_INCR_SUBCHANNEL(value: int) -> int:
return set_bits(13, 2, value)
def NVB06F_DMA_INCR_COUNT(value: int) -> int:
return set_bits(16, 12, value)
def NVB06F_DMA_INCR_OPCODE(value: int) -> int:
return set_bits(29, 2, value)
def NVB06F_DMA_INCR_DATA(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_DMA_NONINCR_ADDRESS(value: int) -> int:
return set_bits(0, 11, value)
def NVB06F_DMA_NONINCR_SUBCHANNEL(value: int) -> int:
return set_bits(13, 2, value)
def NVB06F_DMA_NONINCR_COUNT(value: int) -> int:
return set_bits(16, 12, value)
def NVB06F_DMA_NONINCR_OPCODE(value: int) -> int:
return set_bits(29, 2, value)
def NVB06F_DMA_NONINCR_DATA(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_DMA_ONEINCR_ADDRESS(value: int) -> int:
return set_bits(0, 11, value)
def NVB06F_DMA_ONEINCR_SUBCHANNEL(value: int) -> int:
return set_bits(13, 2, value)
def NVB06F_DMA_ONEINCR_COUNT(value: int) -> int:
return set_bits(16, 12, value)
def NVB06F_DMA_ONEINCR_OPCODE(value: int) -> int:
return set_bits(29, 2, value)
def NVB06F_DMA_ONEINCR_DATA(value: int) -> int:
return set_bits(0, 31, value)
def NVB06F_DMA_IMMD_ADDRESS(value: int) -> int:
return set_bits(0, 11, value)
def NVB06F_DMA_IMMD_SUBCHANNEL(value: int) -> int:
return set_bits(13, 2, value)
def NVB06F_DMA_IMMD_DATA(value: int) -> int:
return set_bits(16, 12, value)
def NVB06F_DMA_IMMD_OPCODE(value: int) -> int:
return set_bits(29, 2, value)
def NVB06F_DMA_SET_SUBDEVICE_MASK_VALUE(value: int) -> int:
return set_bits(4, 11, value)
def NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE(value: int) -> int:
return set_bits(16, 15, value)
def NVB06F_DMA_STORE_SUBDEVICE_MASK_VALUE(value: int) -> int:
return set_bits(4, 11, value)
def NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE(value: int) -> int:
return set_bits(16, 15, value)
def NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE(value: int) -> int:
return set_bits(16, 15, value)
def NVB06F_DMA_ENDSEG_OPCODE(value: int) -> int:
return set_bits(29, 2, value)
def NVB06F_DMA_ADDRESS(value: int) -> int:
return set_bits(2, 10, value)
def NVB06F_DMA_SUBCH(value: int) -> int:
return set_bits(13, 2, value)
def NVB06F_DMA_OPCODE3(value: int) -> int:
return set_bits(16, 1, value)
def NVB06F_DMA_COUNT(value: int) -> int:
return set_bits(18, 10, value)
def NVB06F_DMA_OPCODE(value: int) -> int:
return set_bits(29, 2, value)
def NVB06F_DMA_DATA(value: int) -> int:
return set_bits(0, 31, value)
class _clb06f_tag0(Structure):
_fields_ = [
("Ignored00", c_int * 16),
("Put", c_int),
("Get", c_int),
("Reference", c_int),
("PutHi", c_int),
("Ignored01", c_int * 2),
("TopLevelGet", c_int),
("TopLevelGetHi", c_int),
("GetHi", c_int),
("Ignored02", c_int * 7),
("Ignored03", c_int),
("Ignored04", c_int * 1),
("GPGet", c_int),
("GPPut", c_int),
("Ignored05", c_int * 92),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Extractor for regional and provincial GeoDataframes, given into the required format
for the generation of Geopandas maps.
@author: riccardomaldini
"""
import geopandas as gpd
import pandas as pd
# Constants
RAW_REG_ITALY = gpd.read_file('https://raw.githubusercontent.com/openpolis/geojson-italy/master/geojson/limits_IT_regions.geojson')
RAW_PROV_ITALY = gpd.read_file('https://raw.githubusercontent.com/openpolis/geojson-italy/master/geojson/limits_IT_provinces.geojson')
def extract_regions_geodf():
"""
Creates a GeoDataframe including italian regions, indexed by ISTAT code, including Trento and Bolzano provinces
instead of Trentino Alto Adige.
:rtype: GeoDataframe
"""
reg_italy = RAW_REG_ITALY
prov_italy = RAW_PROV_ITALY
# regions + trento & bolzano
reg_italy = pd.concat([reg_italy, prov_italy[prov_italy['prov_istat_code_num'] == 21],
prov_italy[prov_italy['prov_istat_code_num'] == 22]],
ignore_index=True)
reg_italy['reg_istat_code'] = reg_italy.apply(lambda x: '21' if x['prov_istat_code_num'] == 21 else x['reg_istat_code'],
axis=1)
reg_italy['reg_istat_code'] = reg_italy.apply(lambda x: '22' if x['prov_istat_code_num'] == 22 else x['reg_istat_code'],
axis=1)
reg_italy['codice_regione'] = reg_italy['reg_istat_code']
reg_italy = reg_italy[reg_italy['codice_regione'] != '04']
return reg_italy
def extract_provinces_geodf():
"""
Creates a GeoDataframe including italian provinces, indexed by ISTAT code.
:rtype: GeoDataframe
"""
prov_italy = RAW_PROV_ITALY
# Format as text
prov_italy['codice_provincia'] = prov_italy['prov_istat_code']
return prov_italy
# Pre-computed data. Use this to avoid re-generating the data structure each time
regions_geodf = extract_regions_geodf()
provinces_geodf = extract_provinces_geodf()
|
# Amazon Bot
# Objective: Use browser automation to order an item from Amazon and have it delivered.
# WORK IN PROGRESS - NOT FINISHED YET
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
import time
import config
url = ('http://amazon.com')
# Opens Amazon site
browser = webdriver.Chrome('/Users/thor/Downloads/chromedriver') # Optional argument, if not specified will search path.
browser.get(url)
# Sign into Amazon account
print('Signing into Amazon account')
sign_in = browser.find_element_by_id('nav-link-accountList')
sign_in.click()
browser.implicitly_wait(3)
enter_email = browser.find_element_by_id('ap_email')
enter_email.click()
enter_email.send_keys(config.email)
continue_button = browser.find_element_by_id('continue')
continue_button.click()
enter_pw = browser.find_element_by_id('ap_password')
enter_pw.click()
enter_pw.send_keys(config.pw)
sign_in_submit = browser.find_element_by_id('signInSubmit')
sign_in_submit.click()
print('Signed into account successfully')
search_item = 'chapstick'
search_box = browser.find_element_by_id('twotabsearchtextbox')
search_box.click()
print('Searching for item...')
search_box.send_keys(search_item)
search_submit = browser.find_element_by_id('nav-search-submit-button')
search_submit.click()
#Item: Blistex Medicated Lip Balm, 0.15 Ounce (Pack of 3)
browser.implicitly_wait(3)
chapstick = browser.find_element_by_link_text('Blistex Medicated Lip Balm, 0.15 Ounce (Pack of 3)')
chapstick.click()
# One-time purchase selection
one_time_purchase1 = browser.find_element_by_class_name('a-declarative')
one_time_purchase1.click()
print('Selected one-time purchase successfully')
done_button = browser.find_element_by_name('glowDoneButton')
done_button.click()
print('Clicked on DONE button successfully')
try:
one_time_purchase = browser.find_element_by_id('newAccordionRow')
one_time_purchase.click()
one_time_purchase.click()
one_time_purchase.click()
print('Selected one-time purchase successfully AGAIN')
except Exception as error:
browser.stop_client
print('FAILED to select one time purchase')
print(error)
try:
browser.implicitly_wait(5)
add_to_cart = browser.find_element_by_id('add-to-cart-button')
add_to_cart.click()
print('Added item to cart')
except Exception as error:
print('FAILED to add item to cart')
print(error)
browser.implicitly_wait(5)
proceed_to_checkout = browser.find_element_by_id('hlb-ptc-btn-native')
proceed_to_checkout.click()
place_order = browser.find_element_by_name('placeYourOrder1')
place_order.click()
print('Order placed successfully!') |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import unicode_literals, print_function, division, absolute_import
import argparse
import csv
import os
from .data import absolute_path
from nlp_architect.utils.io import validate_existing_directory
tratz2011_train_labeled_dict = {
False: [
55,
64,
67,
68,
100,
104,
121,
150,
444,
492,
782,
798,
878,
942,
952,
967,
990,
1012,
1031,
1036,
1371,
1658,
1679,
1717,
1719,
2845,
2904,
3454,
3921,
4040,
4059,
4123,
4334,
4435,
4512,
4834,
4890,
4902,
4919,
4923,
4925,
4926,
4932,
4988,
5004,
5078,
5081,
5083,
5094,
5197,
5208,
5246,
5315,
5342,
5409,
5440,
5446,
5656,
5678,
5684,
5688,
5713,
5719,
5723,
5764,
5776,
5779,
5848,
6038,
6049,
6115,
6116,
6136,
6140,
6209,
6236,
6297,
6331,
6334,
6471,
6478,
6490,
6731,
6734,
6736,
6737,
6740,
6798,
6811,
6836,
6837,
6841,
6847,
6848,
6853,
6899,
6900,
6940,
6947,
7003,
7011,
7101,
7107,
7125,
7189,
7330,
7393,
7400,
7500,
7504,
7512,
7591,
7594,
7615,
7742,
7765,
7842,
7935,
8546,
8613,
8621,
8642,
8658,
8665,
8741,
8792,
8823,
8863,
8876,
8878,
9169,
9210,
9277,
9280,
9341,
9403,
9435,
9483,
9517,
9600,
9697,
9749,
9807,
9818,
9842,
9906,
10098,
10161,
10194,
10273,
10315,
10350,
10351,
10396,
10441,
10463,
10468,
10490,
10492,
10497,
10509,
10510,
10537,
10581,
10589,
10616,
10737,
10751,
10922,
10944,
10960,
11007,
11013,
11021,
11181,
11188,
11255,
11297,
11322,
11444,
11464,
11466,
11469,
11474,
11499,
11608,
11629,
11636,
11679,
11692,
11747,
11792,
11865,
11894,
11898,
11908,
12032,
12045,
12046,
12067,
12109,
12173,
12207,
12222,
12385,
12386,
12398,
12408,
12472,
12556,
12669,
12677,
12679,
12755,
12788,
12809,
12818,
12822,
12844,
13031,
13041,
13120,
13122,
13127,
13147,
13160,
13161,
13186,
13188,
13189,
13194,
13214,
13236,
13318,
13368,
13421,
13453,
13456,
13505,
13535,
13583,
13584,
13611,
13653,
13691,
13692,
13701,
13737,
13797,
14007,
14029,
14047,
14066,
14069,
14092,
],
True: [
60,
207,
247,
258,
268,
362,
456,
485,
641,
694,
1025,
1272,
1304,
1317,
1542,
1602,
1643,
1746,
1908,
1909,
2028,
2302,
3424,
3627,
4113,
4398,
4399,
4542,
4759,
4836,
4849,
4957,
5041,
5126,
5306,
5630,
5661,
5708,
5791,
5971,
5983,
6142,
6548,
7293,
7416,
7923,
8932,
9700,
10486,
10746,
10803,
11448,
11781,
12072,
12308,
12354,
12368,
12470,
12510,
12647,
12662,
12766,
12821,
12879,
13494,
14014,
14018,
14020,
14091,
],
}
tratz2011_val_labeled_dict = {
False: [12, 16, 17, 18, 19, 21, 23, 24, 25, 26, 27, 28, 29, 32, 40, 42, 43, 45, 47, 48],
True: [33, 121, 240, 365, 425],
}
def rebuild_row(lst, is_collocation):
"""
Re-construct csv row as expected by data.py `read_csv_file_data` method
Args:
lst (list(str)): list with a string containing '\t'
is_collocation (bool): if collocation True, else False
Returns:
list(str): list of string where the first entry is the noun phrase and the second is 0\1
"""
split_list = lst[0].split("\t")
if is_collocation:
return [split_list[0] + " " + split_list[1], "1"]
return [split_list[0] + " " + split_list[1], "0"]
def read_from_tratz_2011(file_full_path, labeled_dict):
"""
Read tratz_2011 files and print re-formatted csv files
Args:
file_full_path (str): file path
labeled_dict (dict): dictionary with prepared labels
"""
# 1. read the data
with open(file_full_path, "r", encoding="utf-8-sig") as input_file:
reader = csv.reader((line.replace("\0", "") for line in input_file))
reader_list = list(reader)
csv_data = []
for index, row in enumerate(reader_list):
if index in labeled_dict[False]:
csv_data.append(rebuild_row(row, False))
if index in labeled_dict[True]:
csv_data.append(rebuild_row(row, True))
# 2. write to csv file
write_csv(csv_data, file_full_path)
def write_csv(data, output):
"""
Write csv data
Args:
output (str): output file path
data (list(str)):
the csv formatted data
"""
output_path = output[:-3] + "csv"
with open(output_path, "w", encoding="utf-8") as out_file:
writer = csv.writer(out_file, delimiter=",", quotechar='"')
print("CSV file is saved in {0}".format(output_path))
for result_row in data:
writer.writerow(result_row)
def preprocess_tratz_2011(folder_path):
"""
Pre-process tratz_2011 dataset
Args:
folder_path (str): path to the unzipped tratz_2011 dataset
"""
files = [
"tratz2011_coarse_grained_random/train.tsv",
"tratz2011_coarse_grained_random/" "val.tsv",
]
dicts = [tratz2011_train_labeled_dict, tratz2011_val_labeled_dict]
# 1. get abs path
if not os.path.isabs(folder_path):
# handle case using default value\relative paths
folder_path = os.path.join(os.path.dirname(__file__), folder_path)
# 2. add the location of the train file in the folder
for file, dic in zip(files, dicts):
file_full_path = os.path.join(folder_path, file)
read_from_tratz_2011(file_full_path, dic)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Pre-process Tratz 2011 data from tsv to csv")
parser.add_argument(
"--data",
type=validate_existing_directory,
help="path the Tratz_2011_dataset folder local path",
)
args = parser.parse_args()
data_path = absolute_path(args.data)
preprocess_tratz_2011(data_path)
|
# -*- encoding: utf-8 -*-
"""Extension to the standard argparse module.
"""
import argparse
class ArgparseFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
"""Mix both formatter."""
def update_options(options, other):
for k, v in vars(other).items():
setattr(options, k, v)
def parse_cli_args(parser, argv, base_options=None, known=False):
if known:
options, rest = parser.parse_known_args(argv)
else:
options = parser.parse_args(argv)
rest = []
if base_options:
update_options(options, base_options)
chop_cmdsep(rest)
return options, rest
def chop_cmdsep(args):
if args and args[0].strip() == "--":
args.pop(0)
|
# -*- coding: iso-8859-1 -*-
from __future__ import print_function, division
import sys
if( sys.version_info[0] == 2 ):
range = xrange
import math
import qm3.maths.rand
import qm3.maths.matrix
import copy
def stats( v ):
if( type( v ) == float ):
m = v
s = 0.0
elif( len( v ) == 1 ):
m = v[0]
s = 0.0
else:
n = float( len( v ) )
m = sum( v ) / n
s = math.sqrt( sum( [ (i-m)*(i-m) for i in v ] ) / float( n - 1.0 ) )
return( m, s )
def autocorrelation( v, k = 1 ):
n = len( v )
if( k < n ):
m = sum( v ) / float( n )
t = sum( [ (v[i]-m)*(v[i]-m) for i in range( n ) ] )
o = sum( [ (v[i]-m)*(v[i+k]-m) for i in range( n - k ) ] )
return( o / t )
else:
return( 0.0 )
# 1: correlated; 0: uncorrelated; -1: inv. correlated
def pearson( x, y ):
n = len( x )
if( len( y ) != n ):
return( None )
sx = 0.0
sx2 = 0.0
sy = 0.0
sy2 = 0.0
sxy = 0.0
for i in range( n ):
sx += x[i]
sx2 += x[i] * x[i]
sy += y[i]
sy2 += y[i] * y[i]
sxy += x[i] * y[i]
return( ( n * sxy - sx * sy ) / ( math.sqrt( n * sx2 - sx * sx ) * math.sqrt( n * sy2 - sy * sy ) ) )
# The value of the sampling ratio that arises from any given data sequence is the factor
# by which the number of configurations sampled must be increased in order to obtain the
# same precision that would result from randomly distributed data points.
def sampling_ratio( v ):
n = len( v )
m = sum( v ) / float( n )
o = 0.0
for i in range( 1, n ):
o += ( v[i] - m ) * ( v[i-1] - m )
o /= sum( [ (i-m)*(i-m) for i in v ] )
return( ( 1.0 + o ) / ( 1.0 - o ) )
# http://home.deib.polimi.it/matteucc/Clustering/tutorial_html/kmeans.html
# http://datasciencelab.wordpress.com/2014/01/15/improved-seeding-for-clustering-with-k-means/
def k_means( data, K ):
def __dist( vi, vj ):
if( type( vi ) == list ):
o = sum( [ ( vi[k] - vj[k] ) * ( vi[k] - vj[k] ) for k in range( len( vi ) ) ] )
else:
o = ( vi - vj ) * ( vi - vj )
return( o )
M = [ data[qm3.maths.rand.randint( 0, len( data ) - 2 )] ]
while( len( M ) < K ):
d2 = [ min( [ __dist( x, c ) for c in M ] ) for x in data ]
s2 = sum( d2 )
p = [ i / s2 for i in d2 ]
c = [ sum( p[0:i+1] ) for i in range( len( p ) ) ]
r = qm3.maths.rand.random()
i = c.index( [ j for j in c if j >= r ][0] )
M.append( data[i] )
C = None
I = None
o = [ data[i] for i in qm3.maths.rand.sample( range( len( data ) ), K ) ]
if( type( M[0] ) == list ):
t = len( set( sum( M, [] ) ).difference( set( sum( o, [] ) ) ) )
else:
t = len( set( M ).difference( set( o ) ) )
while( C == None or t != 0 ):
o = copy.deepcopy( M )
C = {}
I = {}
for j in range( len( data ) ):
w = min( [ ( __dist( data[j], M[i] ), i ) for i in range( len( M ) ) ] )[1]
try:
C[w].append( data[j] )
I[w].append( j )
except:
C[w] = [ data[j] ]
I[w] = [ j ]
if( type( M[0] ) == list ):
n = len( M[0] )
M = []
for k in iter( C ):
t = [ 0.0 for i in range( n ) ]
for p in C[k]:
for j in range( n ):
t[j] += p[j]
M.append( [ i / float( len( C[k] ) ) for i in t ] )
t = len( set( sum( M, [] ) ).difference( set( sum( o, [] ) ) ) )
else:
M = [ sum( C[k] ) / float( len( C[k] ) ) for k in iter( C ) ]
t = len( set( M ).difference( set( o ) ) )
return( C, I )
# - Principal Components Analysis
# X: [ [x1_N], ..., [xk_N] ] (vars:k, data:N)
#
class PCA( object ):
def __init__( self, x ):
self.var = len( x )
self.dim = len( x[0] )
if( sum( [ len( x[i] ) for i in range( self.var ) ] ) != self.var * self.dim ):
raise Exception( "PCA: All variables (rows) have not the same dimensions" )
self.med = [ sum( x[i] ) / float( self.dim ) for i in range( self.var ) ]
self.dat = [ [ x[i][j] - self.med[i] for j in range( self.dim ) ] for i in range( self.var ) ]
cov = []
for i in range( self.var ):
for j in range( i, self.var ):
cov.append( sum( [ self.dat[i][l] * self.dat[j][l] for l in range( self.dim ) ] ) / float( self.dim ) )
cov = qm3.maths.matrix.from_upper_diagonal_rows( cov, self.var )
try:
self.val, self.vec, conv = qm3.maths._matrix.jacobi( cov, self.var )
self.flg = True
except:
# diag SORTS eigenvalues: reduced systems can not be fully recovered from mean data...
self.val, self.vec = qm3.maths.matrix.diag( cov, self.var )
self.flg = False
# Selection indexes: [ 0-k ]
def select( self, sel, reduced = True ):
ind = sorted( list( set( [ i for i in sel if i >= 0 and i < self.var ] ) ) )
red = len( ind )
if( red == 0 ):
raise Exception( "PCA: Invalid selection" )
mat = []
for i in ind:
for j in range( self.var ):
mat.append( self.vec[j*self.var+i] )
if( reduced ):
out = qm3.maths.matrix.mult( mat, red, self.var, sum( self.dat, [] ), self.var, self.dim )
if( self.flg ):
for i in range( red ):
for j in range( self.dim ):
out[i*self.dim+j] += self.med[ind[i]]
else:
mat = qm3.maths.matrix.mult( qm3.maths.matrix.T( mat, red, self.var ), self.var, red, mat, red, self.var )
out = qm3.maths.matrix.mult( mat, self.var, self.var, sum( self.dat, [] ), self.var, self.dim )
for i in range( self.var ):
for j in range( self.dim ):
out[i*self.dim+j] += self.med[i]
return( out )
try:
import numpy
def np_kmeans( data, K ):
M = [ data[numpy.random.randint(data.shape[0])] ]
while( len( M ) < K ):
d2 = numpy.array( [ min( [ numpy.power( numpy.linalg.norm( x - c ), 2.0 ) for c in M ] ) for x in data ] )
cp = ( d2 / d2.sum() ).cumsum()
r = numpy.random.random()
M.append( data[numpy.where( cp >= r )[0][0]] )
M = numpy.array( M )
C = None
I = None
o = data[numpy.random.choice( range( data.shape[0] ), K, replace = False )]
while( C == None or numpy.setdiff1d( numpy.unique( o ), numpy.unique( M ) ).size != 0 ):
o = M
C = {}
I = {}
for j in range( data.shape[0] ):
w = min( [ ( numpy.linalg.norm( data[j] - M[i] ), i ) for i in range( M.shape[0] ) ] )[1]
try:
C[w].append( data[j] )
I[w].append( j )
except:
C[w] = [ data[j] ]
I[w] = [ j ]
M = numpy.array( [ numpy.mean( C[k], axis = 0 ) for k in iter( C ) ] )
if( type( C[0][0] ) == numpy.array ):
C = { k: numpy.array( C[k] ).reshape( ( len( C[k] ), len( C[k][0] ) ) ) for k in iter( C ) }
else:
C = { k: numpy.array( C[k] ) for k in iter( C ) }
return( C, I )
class np_PCA( object ):
def __init__( self, data ):
self.var = data.shape[0]
self.dim = data.shape[1]
self.med = data.mean( axis = 1 )
self.dat = numpy.array( [ data[i,:] - self.med[i] for i in range( self.var ) ] )
cov = numpy.dot( self.dat, self.dat.T ) / self.dim
self.val, self.vec, conv = qm3.maths.matrix.np_jacobi( cov )
def select( self, sel, reduced = True ):
ind = sorted( list( set( [ i for i in sel if i >= 0 and i < self.var ] ) ) )
if( reduced ):
out = numpy.dot( self.vec[:,ind].T ,self.dat )
for i in range( len( ind ) ):
out[i,:] += self.med[ind[i]]
else:
out = numpy.dot( numpy.dot( self.vec[:,ind], self.vec[:,ind].T ), self.dat )
for i in range( self.var ):
out[i,:] += self.med[i]
return( out )
except:
pass
|
"""Tests for layers.deepr"""
import pytest
import tensorflow as tf
import numpy as np
import deepr as dpr
def test_layers_sum():
"""Test for Sum"""
layer = dpr.layers.Sum()
result = layer((tf.constant(1), tf.constant(2)))
with tf.Session() as sess:
assert sess.run(result) == 3
def test_layers_product():
"""Test for Product"""
layer = dpr.layers.Product()
result = layer((tf.constant(1), tf.constant(2)))
with tf.Session() as sess:
assert sess.run(result) == 2
@pytest.mark.parametrize(
"left_dim, right_dim, expected_dim",
[
([2], [2], []),
([2, 3, 5], [2, 5], [2, 3]),
([2, 3, 4, 5], [2, 5], [2, 3, 4]),
([2, 3, 5], [2, 3, 5], [2, 3]),
([2, 3, 4, 5], [2, 3, 5], [2, 3, 4]),
],
)
def test_layers_dot_product(left_dim, right_dim, expected_dim):
"""Test for DotProduct"""
def _naive_dot_product(left, right):
for _ in range(len(left.shape) - len(right.shape)):
right = tf.expand_dims(right, axis=-2)
return tf.reduce_sum(left * right, axis=-1)
layer = dpr.layers.DotProduct()
left = tf.constant(np.random.random(left_dim))
right = tf.constant(np.random.random(right_dim))
got_tf = layer((left, right))
naive_tf = _naive_dot_product(left, right)
with tf.Session() as sess:
got = sess.run(got_tf)
naive = sess.run(naive_tf)
assert list(got.shape) == expected_dim
np.testing.assert_almost_equal(got, naive, decimal=7)
def test_layers_dense():
"""Test for Dense"""
layer = dpr.layers.Dense(16)
result = layer(tf.ones([8, 8]))
result2 = layer(tf.ones([8, 8]), reuse=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
got = sess.run(result)
got2 = sess.run(result2)
assert got.shape == (8, 16)
np.testing.assert_equal(got, got2)
def test_layers_conv1d():
"""Test for Conv1d"""
layer = dpr.layers.Conv1d(filters=5, kernel_size=1)
result = layer(tf.ones([8, 8, 8]))
result2 = layer(tf.ones([8, 8, 8]), reuse=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
got = sess.run(result)
got2 = sess.run(result2)
assert got.shape == (8, 8, 5)
np.testing.assert_equal(got, got2)
@pytest.mark.parametrize(
"tensor, mask, expected",
[
# normal 1d case
([1, 1, 1, 1], [True, True, True, True], [0.25, 0.25, 0.25, 0.25]),
# normal 2d case
(
[[1, 1, 1, 1], [1, 1, 1, 1]],
[[True, True, True, True], [True, True, True, True]],
[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]],
),
# case with mask
([1, 1, 1, 1], [True, True, False, False], [0.5, 0.5, 0, 0]),
# case with mask
([10_000, 0, 0, 0], [True, True, False, False], [1, 0, 0, 0]),
],
)
def test_layers_softmax(tensor, mask, expected):
"""Test for Softmax layer"""
tensor = tf.constant(tensor, dtype=tf.float32)
mask = tf.constant(mask, dtype=tf.bool)
expected = np.array(expected, dtype=np.float)
results = dpr.layers.Softmax()((tensor, mask))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
got = sess.run(results)
np.testing.assert_equal(expected, got)
|
from django.db import models
# Create your models here.
class Log(models.Model):
_from = models.CharField(max_length=255, null=False, blank=False)
to = models.CharField(max_length=255, null=False, blank=False)
# q = models.IntegerField(null=False, blank=False)
ans = models.CharField(max_length=255, null=False, blank=False)
created_at = models.DateTimeField(null=False,auto_now_add=True)
def __str__(self):
return "{} {}".format(self._from, self.to)
|
# apps/posts/models.py
# Django modules
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
# Define User
User = get_user_model()
# Author model
class Author(models.Model):
# Author has OneToOne relationship with the User model
# An author is belong to a user
user = models.OneToOneField(User,on_delete=models.CASCADE)
profile_picture = models.ImageField()
class Meta:
verbose_name = 'Author'
verbose_name_plural = 'Authors'
def __str__(self):
return self.user.username
# Category model
class Category(models.Model):
title = models.CharField(max_length=50)
class Meta:
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __str__(self):
return self.title
# Tag model
class Tag(models.Model):
title = models.CharField(max_length=50)
# Tag has ManyToOne relationship with Category.
# Many tags can belong to a category.
category = models.ForeignKey(Category, on_delete=models.CASCADE)
class Meta:
verbose_name = 'Tag'
verbose_name_plural = 'Tags'
def __str__(self):
return self.title
# Post model
class Post(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=250)
overview = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
comment_count = models.IntegerField(default=0)
view_count = models.IntegerField(default=0)
# Post has ManyToOne relationship with Author.
# Many posts can belong to an author.
author = models.ForeignKey(Author, on_delete=models.CASCADE)
thumbnail = models.ImageField()
# Post has ManyToMany relationship with Category.
# A post can have many categories.
categories = models.ManyToManyField(Category)
# Post has ManyToMany relationship with Tag.
# A post can have many tags.
tags = models.ManyToManyField(Tag)
featured = models.BooleanField()
class Meta:
verbose_name = 'Post'
verbose_name_plural = 'Posts'
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('posts:post_single', kwargs={
'id':self.id
})
# Gallery images
class Gallery(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='gallery/%Y/%m/%d')
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Gallery'
verbose_name_plural = 'Galleries'
def __str__(self):
return self.title |
import sublime_plugin
import re
from sublime_lib import show_selection_panel
TRANSFORMS = {
"hyphen": lambda words: '-'.join(words),
"underscore": lambda words: '_'.join(words),
"shouty": lambda words: '_'.join([word.upper() for word in words]),
"pascal": lambda words: ''.join([word.capitalize() for word in words]),
"camel": lambda words: ''.join(words[:1] + [word.capitalize() for word in words[1:]]),
}
def split_words(s):
indices = list(
[match.start(), match.end()]
for match in re.finditer(r'\s+|_|-|(?<=[a-z])(?=[A-Z])', s)
)
starts = [0] + [end for start, end in indices]
ends = [start for start, end in indices] + [len(s)]
for start, end in zip(starts, ends):
yield s[start:end].lower()
class RecaseSelectionCommand(sublime_plugin.TextCommand):
def run(self, edit, case=None):
for region in self.view.sel():
original = self.view.substr(region)
words = list(split_words(original))
self.view.replace(edit, region, TRANSFORMS[case](words))
class TransformWordsCommand(sublime_plugin.TextCommand):
def run(self, edit):
first_selection = self.view.substr(self.view.sel()[0])
items = [
[name.capitalize(), function(list(split_words(first_selection)))]
for name, function in TRANSFORMS.items()
]
print(items)
def replace(item):
name, function = item
self.view.run_command('recase_selection', {'case': name})
show_selection_panel(self.view.window(), items, on_select=replace)
|
import math
def get_factor_list(n):
"""
Use trial division to identify the factors of n.
1 is always a factor of any integer so is added at the start.
We only need to check up to n/2, and then add n after the loop.
"""
factors = [1]
for t in range(2, (math.ceil((n / 2) + 1))):
if n % t == 0:
factors.append(t)
factors.append(n)
return factors
def factors(n):
"""
Generator function leveraging the get_factor_list function.
"""
return iter(get_factor_list(n))
if __name__ == '__main__':
test = [100,1000,386945]
for n in test:
print(list(factors(n))) |
import typing
import torch
class LogitData(typing.TypedDict, total=False):
mask: torch.Tensor
quaternion: torch.Tensor
scales: torch.Tensor
z: torch.Tensor
xy: torch.Tensor
class CategoricalData(typing.TypedDict, total=False):
mask: torch.Tensor
quaternion: torch.Tensor
scales: torch.Tensor
z: torch.Tensor
xy: torch.Tensor
class AggData(typing.TypedDict, total=False):
# Meta Data
class_ids: torch.Tensor
sample_ids: torch.Tensor
# Feature Data
instance_masks: torch.Tensor
quaternion: torch.Tensor
scales: torch.Tensor
z: torch.Tensor
xy: torch.Tensor
R: torch.Tensor
T: torch.Tensor
RT: torch.Tensor
class MatchedData(typing.TypedDict, total=False):
# Meta Data
class_ids: torch.Tensor
sample_ids: torch.Tensor
symmetric_ids: torch.Tensor
instance_masks: torch.Tensor
quaternion: torch.Tensor
scales: torch.Tensor
z: torch.Tensor
xy: torch.Tensor
R: torch.Tensor
T: torch.Tensor
RT: torch.Tensor |
# Load module function in Python is changed
# to look for a libmacro.so in LD_LIBRARY_PATH
import sst
import sst.macro
smallLatency = "1ps"
def getParam(params, paramName, paramNS=None):
if not paramName in params:
import sys
if paramNs:
sys.stderr.write("Missing parameter '%s' in namespace '%s'\n" % (paramName, paramNS))
else:
sys.stderr.write("Missing parameter '%s'\n" % (paramName, paramNS))
raise Exception("failed configuring SST/macro")
return params[paramName]
def getParamNamespace(params, ns, parentNs=None):
if not ns in params:
import sys
if parentNs:
sys.stderr.write("Missing parameter namespace '%s' in namespace '%s'\n" % (ns, parentNS))
else:
sys.stderr.write("Missing parameter namespace '%s'\n" % (ns))
raise Exception("failed configuring SST/macro")
return params[ns]
def getNestedParamNamespace(params, *xargs):
nestedNs = ""
nextParams = params
for entry in xargs:
if not entry in nextParams:
sys.stderr.write("Missing parameter namespace %s in params %s\n" % (entry, nestedNs))
raise Exception("failed configuring SST/macro")
nextParams = nextParams[entry]
nestedNs += "%s." % entry
return nextParams
def makeUniLink(linkType,srcComp,srcId,srcPort,dstComp,dstId,dstPort,outLat=None,inLat=None):
if not outLat: outLat = inLat
if not inLat: inLat = outLat
if not outLat: sys.exit("must specify at least one latency for link")
linkName = "%s%d:%d->%d:%d" % (linkType,srcId,srcPort,dstId,dstPort)
link = sst.Link(linkName)
portName = "output%d" % (srcPort)
srcComp.addLink(link,portName,outLat)
portName = "input%d" % (dstPort)
dstComp.addLink(link,portName,inLat)
def makeBiLink(linkType,comp1,id1,port1,comp2,id2,port2,outLat=None,inLat=None):
makeUniLink(linkType,comp1,id1,port1,comp2,id2,port2,outLat,inLat)
makeUniLink(linkType,comp2,id2,port2,comp1,id1,port1,outLat,inLat)
def makeUniNetworkLink(srcComp,srcId,srcPort,dstComp,dstId,dstPort,outLat=None,inLat=None):
makeUniLink("network",srcComp,srcId,srcPort,dstComp,dstId,dstPort,outLat,inLat)
def makeBiNetworkLink(comp1,id1,port1,comp2,id2,port2,outLat=None,inLat=None):
makeBiLink("network",comp1,id1,port1,comp2,id2,port2,outLat,inLat)
def addNew(prefix, kw, newDict, oldDict):
name = "%s.%s" % (prefix, kw)
newDict[name] = oldDict[kw]
def addParams(prefix, dict, *xargs, **kwargs):
for entry in xargs:
if isinstance(entry, type({})):
for kw in entry:
addNew(prefix, kw, dict, entry)
else: #function
entry(prefix, dict)
for kw in kwargs:
addNew(prefix, kw, dict, kwargs)
def addSubParams(oldPrefix, newPrefix, dict, *xargs, **kwargs):
prefix = "%s.%s" % (oldPrefix, newPrefix)
addParams(prefix, dict, *xargs, **kwargs)
def subParams(prefix, *xargs, **kwargs):
return lambda x,y: addSubParams(x, prefix, y, *xargs, **kwargs)
def redoSubParams_impl(nsArr, theDict, allParams):
for key in theDict:
val = theDict[key]
if isinstance(val, dict):
newNsArr = nsArr[:]
newNsArr.append(key)
redoSubParams_impl(newNsArr, val, allParams)
else:
paramArr = nsArr[:]
paramArr.append(key)
newParam = ".".join(paramArr)
allParams.append((newParam, val))
def macroToCoreParams(theDict):
allParams = []
redoSubParams_impl([], theDict, allParams)
newDict = {}
for key, val in allParams:
newDict[key] = val
return newDict
class Interconnect:
def __init__(self, params):
self.params = params
self.system = sst.macro.System(params)
self.num_nodes = self.system.numNodes()
self.num_switches = self.system.numSwitches()
self.switches = [0]*self.num_switches
self.nodes = [0]*self.num_nodes
def numNodes(self):
return self.num_nodes
def numSwitches(self):
return self.num_switches
def defaultEpFxn(self, nodeID):
nodeParams = getParamNamespace(self.params, "node")
topParams = getParamNamespace(self.params,"topology")
compName = getParam(nodeParams, "name", "node").lower()
if not compName.endswith("_node"):
compName += "_node"
node = sst.Component("Node %d" % nodeID, "macro.%s" % compName)
node.addParams(macroToCoreParams(nodeParams))
node.addParams(macroToCoreParams(topParams))
node.addParam("id", nodeID)
return node
def buildSwitches(self):
for i in range(self.num_switches):
switchParams = getParamNamespace(self.params, "switch")
compName = getParam(switchParams, "name", "switch").lower()
if not compName.endswith("_switch"):
compName += "_switch"
switch = sst.Component("Switch %d" % i, "macro.%s" % compName)
switch.addParams(macroToCoreParams(switchParams))
switch.addParam("id", i)
self.switches[i] = (switch, switchParams)
def buildEndpoints(self, epFxn):
for i in range(self.num_nodes):
self.nodes[i] = epFxn(i)
def latency(self, params):
if "latency" in params:
return params["latency"]
else:
import sys
sys.exit("need link latency in parameters")
def latencyAsFloat(self, params):
import re
lat = self.latency(params)
match = re.compile("(\d+[.]?\d*)(.*)").search(lat)
if not match:
sys.exit("improperly formatted latency %s" % lat)
num, units = match.groups()
num = float(num)
units = units.strip().lower()
if units == "ms":
num *= 1e-3
elif units == "us":
num *= 1e-6
elif units == "ns":
num *= 1e-9
elif units == "ps":
num *= 1e-12
return num
def connectSwitches(self):
switchParams = getParamNamespace(self.params, "switch")
for i in range(self.num_switches):
linkParams = getParamNamespace(switchParams, "link", "switch")
connections = self.system.switchConnections(i)
srcSwitch, params = self.switches[i]
lat = self.latency(linkParams)
for srcId, dstId, srcOutport, dstInport in connections:
dstSwitch, dstParams = self.switches[dstId]
makeUniNetworkLink(srcSwitch,srcId,srcOutport,
dstSwitch,dstId,dstInport,
lat)
def connectEndpoints(self):
lat = ""
latNs = getNestedParamNamespace(self.params,"node","nic","injection")
lat = getParam(latNs, "latency")
for swId in range(self.num_switches):
connections = self.system.injectionConnections(swId)
for epId, switchPort, injPort in connections:
ep = self.nodes[epId]
injSwitchComp, params = self.switches[swId]
makeUniLink("injection",ep,epId,injPort,injSwitchComp,swId,switchPort,lat)
connections = self.system.ejectionConnections(swId)
for epId, switchPort, ejPort, in connections:
ep = self.nodes[epId]
ejSwitchComp, params = self.switches[swId]
makeUniLink("ejection",ejSwitchComp,swId,switchPort,ep,epId,ejPort,
outLat=lat,inLat=smallLatency)
# Construct LogP short circuit network for small messages
# sst-macro uses one LogP switch per simulation rank, but using
# a single-switch "star" topology here since elements aren't supposed to
# know anything about simulation parallelism and it greatly simplifies
# sst-core support. We may want to revisit this decision if it proves
# to be a performance bottleneck for MPI parallel simulations.
def buildLogPNetwork(self):
import re
nproc = sst.getMPIRankCount() * sst.getThreadCount()
switchParams = self.params["switch"]
if "logp" in switchParams:
switchParams = switchParams["logp"]
lat = switchParams["out_in_latency"]
switch = sst.Component("LogP 0", "macro.logp_switch")
switch.addParams(macroToCoreParams(switchParams))
switch.addParam("id", 0)
for i in range(self.num_nodes):
ep = self.nodes[i]
linkName = "logPinjection%d->%d" % (i, 0)
#print("configuring link %s" % linkName)
link = sst.Link(linkName)
portName = "output%d" % (sst.macro.NICLogPInjectionPort)
ep.addLink(link, portName, smallLatency) #put no latency here
portName = "input%d" % i
switch.addLink(link, portName, smallLatency)
for i in range(self.num_nodes):
ep = self.nodes[i]
linkName = "logPejection%d->%d" % (0, i)
#print("configuring link %s" % linkName)
link = sst.Link(linkName)
portName = "output%d" % i
switch.addLink(link, portName, lat)
portName = "input%d" % (sst.macro.NICLogPInjectionPort)
ep.addLink(link, portName, lat)
def buildFull(self, epFxn):
self.buildSwitches()
self.buildEndpoints(epFxn)
self.connectSwitches()
self.connectEndpoints()
self.buildLogPNetwork()
def buildLogP(self, epFxn):
self.buildEndpoints(epFxn)
self.buildLogPNetwork()
def build(self, epFxn=None):
if epFxn == None:
epFxn = self.defaultEpFxn
if self.system.isLogP():
self.buildLogP(epFxn)
else:
self.buildFull(epFxn)
def readCmdLineParams():
import sys
return sst.macro.readParams(sys.argv)
def setupDeprecatedParams(params, debugList=[]):
nodeParams = getParamNamespace(params, "node")
swParams = getParamNamespace(params, "switch")
builtinApps = [
"apitest",
"global_test",
"hello_world",
"mpi_coverage",
"mpi_ping_all",
"mpi_print_nodes",
"mpi_topology",
"parsedumpi",
"parseotf2",
"sstmac_mpi_testall",
"traffic_matrix",
"UserAppCxxEmptyMain",
"UserAppCxxFullMain",
]
for i in range(10):
ns = "app%d" % i
if ns in params:
appParams = params[ns]
nodeParams[ns] = appParams
appName = appParams["name"]
if not appName in builtinApps:
cmd = "import sst.%s" % appName
exec(cmd)
del params[ns]
icParams = {}
topParams = getParamNamespace(params,"topology")
icParams["topology"] = topParams
nodeParams["interconnect"] = icParams
nodeParams["topology"] = topParams
if debugList:
nodeParams["debug"] = "[" + ",".join(debugList) + "]"
swParams["topology"] = topParams
#move every param in the global namespace
#into the individal namespaces
for ns in "node", "switch":
nsParams = params[ns]
for key in params:
val = params[key]
if isinstance(val, str):
if not key in nsParams:
nsParams[key] = val
ic = Interconnect(params)
ic.build()
return ic
def setupDeprecated():
print ("setupDeprecated")
import sys
sst.setProgramOption("timebase", "100as")
params = readCmdLineParams()
debugList = []
if "debug" in params:
debugList = params["debug"].strip().split()
for i in range(len(sys.argv)):
if sys.argv[i] == "-d" or sys.argv[i] == "--debug":
debugList.extend(sys.argv[i+1].split(","))
return setupDeprecatedParams(params, debugList)
|
import os
import time
import random
from libs import engineLib as engine
class Event(object):##these are events, where the majority of the Engines power comes from, events can print, add/remove items to the room and player, and teleport the player to a new location without informing them. Each command can only be used once it seems.
activeNPC = "none"
def __init__(self, Location, Character, EventActions, EventOrder, Repeat, bToConversation, NPC):
self.Location = Location
self.Character = Character
self.EventActions = EventActions
self.EventOrder = EventOrder
self.Repeat = Repeat
self.bToConversation = bToConversation
self.NPC = NPC
def triggerEvent(self, activeLocation, activeCharacter):##this runs through all the event items
self.Location = activeLocation
self.Character = activeCharacter
if(self.Repeat >= 0):
for e in self.EventOrder:
if(e != "EVENT"):
if(e != "RANDOMEVENT"):
engine.stringToClassDef(self, e)(self.EventActions[e])
time.sleep(0.1)
else:
self.Repeat -= 1
engine.stringToClassDef(self, e)(self.EventActions[e])
else:
self.Repeat -= 1
engine.stringToClassDef(engine.stringToClass(self.EventActions[e]), "triggerEvent")(self.Location, self.Character)
self.Repeat -= 1
if(self.bToConversation == False):
engine.Scene(self.Location, self.Character)
else:
engine.Conversation(self.Location, self.Character, engine.stringToClass(self.NPC), engine.stringToClass(self.NPC).Convo["intro"], engine.stringToClass(self.NPC).Convo["intro"])
if(self.Repeat <= -1):
for e in self.EventOrder:
if(e != "EVENT"):
if(e != "RANDOMEVENT"):
engine.stringToClassDef(self, e)(self.EventActions[e])
time.sleep(0.1)
else:
self.Repeat -= 1
engine.stringToClassDef(self, e)(self.EventActions[e])
else:
engine.stringToClassDef(engine.stringToClass(self.EventActions[e]), "triggerEvent")(self.Location, self.Character)
if(self.bToConversation == False):
engine.Scene(self.Location, self.Character)
else:
engine.Conversation(self.Location, self.Character, engine.stringToClass(self.NPC), engine.stringToClass(self.NPC).Convo["intro"], engine.stringToClass(self.NPC).Convo["intro"])
else:
if(self.bToConversation == False):
engine.Scene(self.Location, self.Character)
else:
engine.Conversation(self.Location, self.Character, engine.stringToClass(self.NPC), engine.stringToClass(self.NPC).Convo["intro"], engine.stringToClass(self.NPC).Convo["intro"])
def PRINT(self, text):##Call to print something to screen.
print(text)
def ADDTOINVENTORY(self, item):##Call to add an item to the player character
self.Character.addToInventory(item[0], item[1])
def REMOVEFROMINVENTORY(self, item):##Call to remove an item from the player character
self.Character.removeFromInventory(item[0], item[1])
def ADDITEM(self, item):##Call to add an item to the surround area
self.Location.addItem(item[0], item[1])
def REMOVEITEM(self, item):##Call to remove an item from the surrounding area
self.Location.removeItem(item[0], item[1])
def TELEPORT(self, newLocation):##Call to teleport the player to a different room without telling them. Good for making a room 'change'
self.Location = engine.stringToClass(newLocation)
def ADDEXIT(self, newExit):##This adds exits to the Location
for x in newExit:
self.Location.addExit(x, newExit[x])
def REMOVEEXIT(self, delExit):##This removes and exit from the Location
self.Location.removeExit(delExit)
def WAIT(self, waitText):##Prints waitText and waits for input, does not save input. Use this for walls of text/page turning etc. I dont like the current functionality, but its the only way to actually make it reliable.
os.system("echo %s" % (waitText))
os.system("pause")
def ADDSTRUCTURE(self, structure):##Adds a structure to the room
self.Location.addStructure(structure)
def REMOVESTRUCTURE(self, structure):## Removes a structure from the room.
self.Location.removeStucture(structure)
def ADDNPC(self, NPC):##Adds NPC to zone
self.Location.addNPC(NPC)
def REMOVENPC(self, NPC):##Removes an NPC from the zone
self.Location.removeNPC(NPC)
def ADDTONPCINVENTORY(self, item):##Adds item to the active NPCs inventory
engine.stringToClass(item[0]).addToInventory(item[1], item[2])
def REMOVEFROMNPCINVENTORY(self, item):##Removes item from active NPC inventory.
engine.stringToClass(item[0]).removeFromInventory(item[1], item[2])
def RANDOMEVENT(self, eventList):##Rolls through a list of events and picks one at random.
engine.stringToClassDef(engine.stringToClass(eventList[random.randint(0, len(eventList)-1)]), "triggerEvent")(self.Location, self.Character)
def MODIFYPCHP(self, mod):
self.Character.HP += mod
def MODIFYPCSP(self, mod):
self.Character.SP += mod
def MODIFYPCMP(self, mod):
self.Character.MP += mod
def MODIFYPCMIND(self, mod):
self.Character.Mind += mod
def MODIFYPCBODY(self, mod):
self.Character.Body += mod
def MODIFYPCSPIRIT(self, mod):
self.Character.Spirit += mod
def SETPCHP(self, mod):
self.Character.HP = mod
def SETPCSP(self, mod):
self.Character.SP = mod
def SETPCMP(self, mod):
self.Character.MP = mod
def SETPCMIND(self, mod):
self.Character.Mind = mod
def SETPCBODY(self, mod):
self.Character.Body = mod
def SETPCSPIRIT(self, mod):
self.Character.Spirit = mod |
from .shopify_checker_view import is_shopify_shop_view |
"""Recipe for deploying demoapp"""
from fabric.api import execute
from fabric.decorators import task
from orchalib import get_current_release_dir, get_app_basedir
from orchalib import tasks
@task
def print_app_version():
"""Print the currently deployed version info for the app."""
execute(tasks.print_app_version, 'demoapp')
@task
def service_restart():
"""Restart services for this app."""
execute(tasks.service_restart, 'demoapp')
@task
def deploy(uri=None, **_):
"""Deploy demoapp.
Args:
uri: The S3 URI for the application artifact to be deployed.
"""
assert uri is not None
app_name = 'demoapp'
config_dir = '{}/config'.format(get_app_basedir(app_name))
release_dir = get_current_release_dir(app_name)
execute(tasks.local_fetch_s3_artifact, uri)
execute(tasks.deploy_artifact, app_name, uri)
execute(tasks.create_symlink,
'{}/config.yml'.format(config_dir),
'{}/config.yml'.format(release_dir))
execute(tasks.service_restart, app_name)
|
from django.contrib import admin
from core.models import *
|
import file_io
import os
def sample(avid_dir_list):
avi_dir_list = file_io.get_dir_list(avid_dir_list)
print(avi_dir_list)
for avi_dir in avi_dir_list:
avi_file_list = file_io.get_listfile(avi_dir, ".avi")
avi_file_list.sort()
for avi in avi_file_list:
image_dir = avi.replace(".avi", "")
command = "ffmpeg -i " + avi + " " + image_dir + "/%06d.jpg"
os.system(command)
if __name__ == "__main__":
#avid_dir_list = "../data/data_new/Training_Data"
avid_dir_list = "/home/mscvadmin/Downloads/wd_new/Training_Data"
#sample(avid_dir_list)
#avid_dir_list = "../data/data_new/Testing_Data"
avid_dir_list = "/home/mscvadmin/Downloads/wd_new/Testing_Data"
sample(avid_dir_list)
|
#! /usr/bin/env python3
from collections import Counter
from pathlib import Path
import numpy as np
from solutions.util.parse import readFileLines
# * INFO
AOC_YEAR = 2021
AOC_DAY = 4
AOC_PROBLEM = 1
# * UTIL
scriptpath = Path(__file__).parent.resolve()
def parseLine(line: str):
line = line.replace(' ', ' ') # Change double spaces to 1 space
line = line.lstrip() # Remove leading space
line = line.replace(' ', ',') # Change spaces to commas
if line == '\n' or line == '':
return None
return [int(n) for n in line.split(',')] # Parse integers and return as list
def parseGame(lines: list) -> tuple:
draws = lines[0]
boards = []
current_board = -1 if lines[1] is None else 0
for line in lines[1:]:
# Go to next board
if line is None:
current_board += 1
continue
# Init board if needed
if current_board >= len(boards):
boards.append([])
# Add line to board
boards[current_board].append([(n, False) for n in line])
draws = np.array(draws)
boards = np.rec.array(boards, dtype=[('number', 'i'), ('marked', 'bool')])
return draws, boards
def checkBoards(boards):
for index, board in enumerate(boards):
# Check rows
for rowIndex in range(board.shape[0]):
markedCount = Counter(board.marked[rowIndex])[True]
if markedCount == board.shape[1]:
return index, board
for columnIndex in range(board.shape[1]):
markedCount = Counter(board.marked[:,columnIndex])[True]
if markedCount == board.shape[0]:
return index, board
return None, None
# * MAIN
print(f'Advent of Code - {AOC_YEAR} - Day {AOC_DAY:02d} - Problem {AOC_PROBLEM:02d}')
print(f'{"="*50}\n')
# Get the lines
inputPath = scriptpath / Path('./input.txt')
inputLines = readFileLines(inputPath, parseLine)
print(f'There are {len(inputLines)} inputs\n')
# Parse into drawn numbers and boards
draws, boards = parseGame(inputLines)
print(f' -> We drew {draws.shape[0]} numbers')
print(f' -> We have {boards.shape[0]} boards')
lastDraw = None
winnerIndex = None
winnerBoard = None
for index, draw in enumerate(draws):
# Save last draw
lastDraw = draw
# Mark numbers
boards.marked[boards.number == draw] = True
# We can start winning when we have at least 5 draws. This is a small
# optimization that becomes important with many boards to check
if index > 4:
# Check for winner
winnerIndex, winnerBoard = checkBoards(boards)
if winnerBoard is not None:
break
if winnerBoard is None:
print(' -> None of the boards won')
exit()
boardScore = sum(winnerBoard.number[winnerBoard.marked == False])
result = boardScore * lastDraw
print(f' -> Board #{winnerIndex+1} (of {boards.shape[0]}) won\n')
print(f'score = {boardScore:6d}')
print(f'lastDraw = {lastDraw:6d}')
print(f'{"-"*40}')
print(f'{" "*7}* = {result:6d}')
|
import datetime
import secrets
from functools import wraps
import rethinkdb
from flask import request
from werkzeug.exceptions import NotFound, Unauthorized
TIME_ZONE_UTC = rethinkdb.make_timezone("+00:00")
ACCESS_TOKEN_LENGTH = 32
ACCESS_TOKEN_EXPIRATION = datetime.timedelta(hours=1)
REFRESH_TOKEN_LENGTH = 64
class OAuthAccessToken:
def __init__(self, user_id: str):
"""
Generates an access token
"""
self.user_id = user_id
self.token = secrets.token_urlsafe(ACCESS_TOKEN_LENGTH)
self.token_expiration = datetime.datetime.now(TIME_ZONE_UTC) + ACCESS_TOKEN_EXPIRATION
class OAuthRefreshToken:
def __init__(self, user_id: str):
"""
Generates a refresh token
"""
self.user_id = user_id
self.token = secrets.token_urlsafe(REFRESH_TOKEN_LENGTH)
def oauth(force):
"""
Verifies that the API request is authenticated, and queries the authenticated user.
This decorator wraps around REST method handlers (get, post, put, etc.)
:param force: if True, the request will be aborted if authentication fails; if False, the request will continue.
"""
def wrapper(f):
@wraps(f)
def inner(self, *args, **kwargs):
self.authenticated = False
self.user_data = None
# check if the GET parameter (access_token) isn't supplied
if "access_token" not in request.args:
headers = dict(request.headers)
if "Authorization" not in headers:
if force:
raise Unauthorized(description="This resource requires authentication.")
else:
return f(self, *args, **kwargs)
auth_header = headers["Authorization"].split(" ", maxsplit=1)
if len(auth_header) is not 2 or auth_header[0] != "Bearer":
if force:
raise Unauthorized(description="Invalid Authorization header.")
else:
return f(self, *args, **kwargs)
bearer_token = auth_header[1]
else:
bearer_token = request.args.get("access_token", type=str)
# Check if the token exists
token_doc = self.db.get_doc("oauth_tokens", bearer_token)
if not token_doc:
if force:
raise Unauthorized(description="Invalid access token.")
else:
return f(self, *args, **kwargs)
# Check if the token is expired
expiration: datetime.datetime = token_doc["expires"]
if expiration < datetime.datetime.now(expiration.tzinfo):
if force:
raise Unauthorized(
description="The provided access token is expired. "
"Use the /auth/refresh route with a refresh token to get a new access token.")
else:
return f(self, *args, **kwargs)
user_id = token_doc["user_id"]
self.user_data = self.db.get_doc("users", user_id)
if not self.user_data:
raise NotFound()
# All is good
self.authenticated = True
return f(self, *args, **kwargs)
return inner
return wrapper
|
print("Example script!")
|
import datetime
from django.core.management.base import BaseCommand
from record_form.models import PersonInfo
class Command(BaseCommand):
help = """
Saves personal information not updated in last 24 hours
Use this command in a cron job
to save older records
you can test if the subcommand works by doing:
python3 manage.py save_old
"""
def handle(self, **options):
now = datetime.datetime.now()
yesterday = now - datetime.timedelta(1)
old_info = PersonInfo.objects.filter(updated_on__lte=yesterday)
old_info.save()
|
TIMEOPT_CONFIG_FILE = "cfg_softConstraints_talos_kinConstraints.yaml"
from common_talos import *
SCRIPT_PATH = "demos"
ENV_NAME = "multicontact/plateforme_not_flat"
DURATION_INIT = 4. # Time to init the motion
DURATION_SS =1.8
DURATION_DS = 0.3
DURATION_TS = 0.4
DURATION_CONNECT_GOAL = 2.
w_am = 0.5
EFF_T_PREDEF = 0.2
p_max = 0.13
COM_SHIFT_Z = -0.03
TIME_SHIFT_COM = 2.
GUIDE_STEP_SIZE = 0.4 |
from __future__ import print_function
import numpy as np
from sklearn.decomposition import NMF
from vectorize import X,F
model = NMF(6)
W = model.fit_transform(X)
H = model.components_
#print(W) # doc->topic
#print(H) # topic->word
for topic in H:
print()
for w,t in sorted(zip(topic,F),reverse=True):
if w<0.001: continue
print(t,w)
|
# dicom operations
import os
import os.path as op
import logging
from collections import OrderedDict
import tarfile
from heudiconv.external.pydicom import dcm
from .utils import SeqInfo, load_json, set_readonly
lgr = logging.getLogger(__name__)
def group_dicoms_into_seqinfos(files, file_filter, dcmfilter, grouping):
"""Process list of dicoms and return seqinfo and file group
`seqinfo` contains per-sequence extract of fields from DICOMs which
will be later provided into heuristics to decide on filenames
Parameters
----------
files : list of str
List of files to consider
file_filter : callable, optional
Applied to each item of filenames. Should return True if file needs to be
kept, False otherwise.
dcmfilter : callable, optional
If called on dcm_data and returns True, it is used to set series_id
grouping : {'studyUID', 'accession_number', None}, optional
what to group by: studyUID or accession_number
Returns
-------
seqinfo : list of list
`seqinfo` is a list of info entries per each sequence (some entry
there defines a key for `filegrp`)
filegrp : dict
`filegrp` is a dictionary with files groupped per each sequence
"""
allowed_groupings = ['studyUID', 'accession_number', None]
if grouping not in allowed_groupings:
raise ValueError('I do not know how to group by {0}'.format(grouping))
per_studyUID = grouping == 'studyUID'
per_accession_number = grouping == 'accession_number'
lgr.info("Analyzing %d dicoms", len(files))
groups = [[], []]
mwgroup = []
studyUID = None
# for sanity check that all DICOMs came from the same
# "study". If not -- what is the use-case? (interrupted acquisition?)
# and how would then we deal with series numbers
# which would differ already
if file_filter:
nfl_before = len(files)
files = list(filter(file_filter, files))
nfl_after = len(files)
lgr.info('Filtering out {0} dicoms based on their filename'.format(
nfl_before-nfl_after))
for fidx, filename in enumerate(files):
from heudiconv.external.dcmstack import ds
# TODO after getting a regression test check if the same behavior
# with stop_before_pixels=True
mw = ds.wrapper_from_data(dcm.read_file(filename, force=True))
for sig in ('iop', 'ICE_Dims', 'SequenceName'):
try:
del mw.series_signature[sig]
except:
pass
try:
file_studyUID = mw.dcm_data.StudyInstanceUID
except AttributeError:
lgr.info("File {} is missing any StudyInstanceUID".format(filename))
file_studyUID = None
try:
series_id = (int(mw.dcm_data.SeriesNumber),
mw.dcm_data.ProtocolName)
file_studyUID = mw.dcm_data.StudyInstanceUID
if not per_studyUID:
# verify that we are working with a single study
if studyUID is None:
studyUID = file_studyUID
elif not per_accession_number:
assert studyUID == file_studyUID, (
"Conflicting study identifiers found [{}, {}].".format(
studyUID, file_studyUID
))
except AttributeError as exc:
lgr.warning('Ignoring %s since not quite a "normal" DICOM: %s',
filename, exc)
series_id = (-1, 'none')
file_studyUID = None
if not series_id[0] < 0:
if dcmfilter is not None and dcmfilter(mw.dcm_data):
series_id = (-1, mw.dcm_data.ProtocolName)
# filter out unwanted non-image-data DICOMs by assigning
# a series number < 0 (see test below)
if not series_id[0] < 0 and mw.dcm_data[0x0008, 0x0016].repval in (
'Raw Data Storage',
'GrayscaleSoftcopyPresentationStateStorage'):
series_id = (-1, mw.dcm_data.ProtocolName)
if per_studyUID:
series_id = series_id + (file_studyUID,)
ingrp = False
for idx in range(len(mwgroup)):
# same = mw.is_same_series(mwgroup[idx])
if mw.is_same_series(mwgroup[idx]):
# the same series should have the same study uuid
assert (mwgroup[idx].dcm_data.get('StudyInstanceUID', None)
== file_studyUID)
ingrp = True
if series_id[0] >= 0:
series_id = (mwgroup[idx].dcm_data.SeriesNumber,
mwgroup[idx].dcm_data.ProtocolName)
if per_studyUID:
series_id = series_id + (file_studyUID,)
groups[0].append(series_id)
groups[1].append(idx)
if not ingrp:
mwgroup.append(mw)
groups[0].append(series_id)
groups[1].append(len(mwgroup) - 1)
group_map = dict(zip(groups[0], groups[1]))
total = 0
seqinfo = OrderedDict()
# for the next line to make any sense the series_id needs to
# be sortable in a way that preserves the series order
for series_id, mwidx in sorted(group_map.items()):
if series_id[0] < 0:
# skip our fake series with unwanted files
continue
mw = mwgroup[mwidx]
if mw.image_shape is None:
# this whole thing has now image data (maybe just PSg DICOMs)
# nothing to see here, just move on
continue
dcminfo = mw.dcm_data
series_files = [files[i] for i, s in enumerate(groups[0])
if s == series_id]
# turn the series_id into a human-readable string -- string is needed
# for JSON storage later on
if per_studyUID:
studyUID = series_id[2]
series_id = series_id[:2]
accession_number = dcminfo.get('AccessionNumber')
series_id = '-'.join(map(str, series_id))
size = list(mw.image_shape) + [len(series_files)]
total += size[-1]
if len(size) < 4:
size.append(1)
# MG - refactor into util function
try:
TR = float(dcminfo.RepetitionTime) / 1000.
except (AttributeError, ValueError):
TR = -1
try:
TE = float(dcminfo.EchoTime)
except (AttributeError, ValueError):
TE = -1
try:
refphys = str(dcminfo.ReferringPhysicianName)
except AttributeError:
refphys = ''
try:
image_type = tuple(dcminfo.ImageType)
except AttributeError:
image_type = ''
try:
series_desc = dcminfo.SeriesDescription
except AttributeError:
series_desc = ''
motion_corrected = 'MOCO' in image_type
if dcminfo.get([0x18,0x24], None):
# GE and Philips scanners
sequence_name = dcminfo[0x18,0x24].value
elif dcminfo.get([0x19, 0x109c], None):
# Siemens scanners
sequence_name = dcminfo[0x19, 0x109c].value
else:
sequence_name = 'Not found'
info = SeqInfo(
total,
op.split(series_files[0])[1],
series_id,
op.basename(op.dirname(series_files[0])),
'-', '-',
size[0], size[1], size[2], size[3],
TR, TE,
dcminfo.ProtocolName,
motion_corrected,
'derived' in [x.lower() for x in dcminfo.get('ImageType', [])],
dcminfo.get('PatientID'),
dcminfo.get('StudyDescription'),
refphys,
dcminfo.get('SeriesDescription'),
sequence_name,
image_type,
accession_number,
# For demographics to populate BIDS participants.tsv
dcminfo.get('PatientAge'),
dcminfo.get('PatientSex'),
dcminfo.get('AcquisitionDate'),
dcminfo.get('SeriesInstanceUID')
)
# candidates
# dcminfo.AccessionNumber
# len(dcminfo.ReferencedImageSequence)
# len(dcminfo.SourceImageSequence)
# FOR demographics
if per_studyUID:
key = studyUID.split('.')[-1]
elif per_accession_number:
key = accession_number
else:
key = ''
lgr.debug("%30s %30s %27s %27s %5s nref=%-2d nsrc=%-2d %s" % (
key,
info.series_id,
dcminfo.SeriesDescription,
dcminfo.ProtocolName,
info.is_derived,
len(dcminfo.get('ReferencedImageSequence', '')),
len(dcminfo.get('SourceImageSequence', '')),
info.image_type
))
if per_studyUID:
if studyUID not in seqinfo:
seqinfo[studyUID] = OrderedDict()
seqinfo[studyUID][info] = series_files
elif per_accession_number:
if accession_number not in seqinfo:
seqinfo[accession_number] = OrderedDict()
seqinfo[accession_number][info] = series_files
else:
seqinfo[info] = series_files
if per_studyUID:
lgr.info("Generated sequence info for %d studies with %d entries total",
len(seqinfo), sum(map(len, seqinfo.values())))
elif per_accession_number:
lgr.info("Generated sequence info for %d accession numbers with %d "
"entries total", len(seqinfo), sum(map(len, seqinfo.values())))
else:
lgr.info("Generated sequence info with %d entries", len(seqinfo))
return seqinfo
def get_dicom_series_time(dicom_list):
"""Get time in seconds since epoch from dicom series date and time
Primarily to be used for reproducible time stamping
"""
import time
import calendar
dicom = dcm.read_file(dicom_list[0], stop_before_pixels=True, force=True)
dcm_date = dicom.SeriesDate # YYYYMMDD
dcm_time = dicom.SeriesTime # HHMMSS.MICROSEC
dicom_time_str = dcm_date + dcm_time.split('.', 1)[0] # YYYYMMDDHHMMSS
# convert to epoch
return calendar.timegm(time.strptime(dicom_time_str, '%Y%m%d%H%M%S'))
def compress_dicoms(dicom_list, out_prefix, tempdirs, overwrite):
"""Archives DICOMs into a tarball
Also tries to do it reproducibly, so takes the date for files
and target tarball based on the series time (within the first file)
Parameters
----------
dicom_list : list of str
list of dicom files
out_prefix : str
output path prefix, including the portion of the output file name
before .dicom.tgz suffix
tempdirs : object
TempDirs object to handle multiple tmpdirs
overwrite : bool
Overwrite existing tarfiles
Returns
-------
filename : str
Result tarball
"""
tmpdir = tempdirs(prefix='dicomtar')
outtar = out_prefix + '.dicom.tgz'
if op.exists(outtar) and not overwrite:
lgr.info("File {} already exists, will not overwrite".format(outtar))
return
# tarfile encodes current time.time inside making those non-reproducible
# so we should choose which date to use.
# Solution from DataLad although ugly enough:
dicom_list = sorted(dicom_list)
dcm_time = get_dicom_series_time(dicom_list)
def _assign_dicom_time(ti):
# Reset the date to match the one of the last commit, not from the
# filesystem since git doesn't track those at all
ti.mtime = dcm_time
return ti
# poor man mocking since can't rely on having mock
try:
import time
_old_time = time.time
time.time = lambda: dcm_time
if op.lexists(outtar):
os.unlink(outtar)
with tarfile.open(outtar, 'w:gz', dereference=True) as tar:
for filename in dicom_list:
outfile = op.join(tmpdir, op.basename(filename))
if not op.islink(outfile):
os.symlink(op.realpath(filename), outfile)
# place into archive stripping any lead directories and
# adding the one corresponding to prefix
tar.add(outfile,
arcname=op.join(op.basename(out_prefix),
op.basename(outfile)),
recursive=False,
filter=_assign_dicom_time)
finally:
time.time = _old_time
tempdirs.rmtree(tmpdir)
return outtar
def embed_nifti(dcmfiles, niftifile, infofile, bids_info, force, min_meta):
"""
If `niftifile` doesn't exist, it gets created out of the `dcmfiles` stack,
and json representation of its meta_ext is returned (bug since should return
both niftifile and infofile?)
if `niftifile` exists, its affine's orientation information is used while
establishing new `NiftiImage` out of dicom stack and together with `bids_info`
(if provided) is dumped into json `infofile`
Parameters
----------
dcmfiles
niftifile
infofile
bids_info
force
min_meta
Returns
-------
niftifile, infofile
"""
# imports for nipype
import nibabel as nb
import os
import os.path as op
import json
import re
if not min_meta:
import dcmstack as ds
stack = ds.parse_and_stack(dcmfiles, force=force).values()
if len(stack) > 1:
raise ValueError('Found multiple series')
#stack = stack[0] # does not work in Python 3
stack = next(iter(stack))
#Create the nifti image using the data array
if not op.exists(niftifile):
nifti_image = stack.to_nifti(embed_meta=True)
nifti_image.to_filename(niftifile)
return ds.NiftiWrapper(nifti_image).meta_ext.to_json()
orig_nii = nb.load(niftifile)
aff = orig_nii.affine
ornt = nb.orientations.io_orientation(aff)
axcodes = nb.orientations.ornt2axcodes(ornt)
new_nii = stack.to_nifti(voxel_order=''.join(axcodes), embed_meta=True)
meta = ds.NiftiWrapper(new_nii).meta_ext.to_json()
meta_info = None if min_meta else json.loads(meta)
if bids_info:
if min_meta:
meta_info = bids_info
else:
# make nice with python 3 - same behavior?
meta_info = meta_info.copy()
meta_info.update(bids_info)
# meta_info = dict(meta_info.items() + bids_info.items())
try:
meta_info['TaskName'] = (re.search('(?<=_task-)\w+',
op.basename(infofile))
.group(0).split('_')[0])
except AttributeError:
pass
# write to outfile
with open(infofile, 'wt') as fp:
json.dump(meta_info, fp, indent=3, sort_keys=True)
return niftifile, infofile
def embed_metadata_from_dicoms(bids, item_dicoms, outname, outname_bids,
prov_file, scaninfo, tempdirs, with_prov,
min_meta):
"""
Enhance sidecar information file with more information from DICOMs
Parameters
----------
bids
item_dicoms
outname
outname_bids
prov_file
scaninfo
tempdirs
with_prov
min_meta
Returns
-------
"""
from nipype import Node, Function
tmpdir = tempdirs(prefix='embedmeta')
# We need to assure that paths are absolute if they are relative
item_dicoms = list(map(op.abspath, item_dicoms))
embedfunc = Node(Function(input_names=['dcmfiles', 'niftifile', 'infofile',
'bids_info', 'force', 'min_meta'],
output_names=['outfile', 'meta'],
function=embed_nifti),
name='embedder')
embedfunc.inputs.dcmfiles = item_dicoms
embedfunc.inputs.niftifile = op.abspath(outname)
embedfunc.inputs.infofile = op.abspath(scaninfo)
embedfunc.inputs.min_meta = min_meta
if bids:
embedfunc.inputs.bids_info = load_json(op.abspath(outname_bids))
else:
embedfunc.inputs.bids_info = None
embedfunc.inputs.force = True
embedfunc.base_dir = tmpdir
cwd = os.getcwd()
lgr.debug("Embedding into %s based on dicoms[0]=%s for nifti %s",
scaninfo, item_dicoms[0], outname)
try:
if op.lexists(scaninfo):
# TODO: handle annexed file case
if not op.islink(scaninfo):
set_readonly(scaninfo, False)
res = embedfunc.run()
set_readonly(scaninfo)
if with_prov:
g = res.provenance.rdf()
g.parse(prov_file,
format='turtle')
g.serialize(prov_file, format='turtle')
set_readonly(prov_file)
except Exception as exc:
lgr.error("Embedding failed: %s", str(exc))
os.chdir(cwd)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/main_window.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(792, 547)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.cartPoleWidget = CartPoleWidget(self.centralwidget)
self.cartPoleWidget.setObjectName("cartPoleWidget")
self.horizontalLayout.addWidget(self.cartPoleWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 792, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuRecent_files = QtWidgets.QMenu(self.menuFile)
self.menuRecent_files.setObjectName("menuRecent_files")
self.menu_View = QtWidgets.QMenu(self.menubar)
self.menu_View.setObjectName("menu_View")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/quit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionQuit.setIcon(icon)
self.actionQuit.setObjectName("actionQuit")
self.actionOpen = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/icons/open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon1)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setEnabled(False)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/icons/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon2)
self.actionSave.setObjectName("actionSave")
self.actionSave_as = QtWidgets.QAction(MainWindow)
self.actionSave_as.setObjectName("actionSave_as")
self.actionAbout = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/icons/about.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionAbout.setIcon(icon3)
self.actionAbout.setObjectName("actionAbout")
self.actionClearRecentFiles = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icons/icons/clear.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionClearRecentFiles.setIcon(icon4)
self.actionClearRecentFiles.setObjectName("actionClearRecentFiles")
self.menuRecent_files.addAction(self.actionClearRecentFiles)
self.menuRecent_files.addSeparator()
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.menuRecent_files.menuAction())
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSave_as)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menu_View.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.actionQuit.triggered.connect(MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.menuFile.setTitle(_translate("MainWindow", "&File"))
self.menuRecent_files.setTitle(_translate("MainWindow", "Recent files"))
self.menu_View.setTitle(_translate("MainWindow", "&View"))
self.menuHelp.setTitle(_translate("MainWindow", "&?"))
self.actionQuit.setText(_translate("MainWindow", "&Quit"))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionOpen.setText(_translate("MainWindow", "Open..."))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionSave_as.setText(_translate("MainWindow", "Save as..."))
self.actionAbout.setText(_translate("MainWindow", "&About..."))
self.actionAbout.setShortcut(_translate("MainWindow", "F1"))
self.actionClearRecentFiles.setText(_translate("MainWindow", "Clear"))
from src.widgets.cart_pole_widget import CartPoleWidget
import resources_rc
|
from pyflink.table import EnvironmentSettings, BatchTableEnvironment
from customer_denorm import CustomerDenorm
# create a flink table env in batch mode
# ------------------------------------------------------------
env_settings = EnvironmentSettings \
.new_instance() \
.in_batch_mode() \
.use_blink_planner() \
.build()
table_env = BatchTableEnvironment \
.create(environment_settings=env_settings)
# create CustomerDenorm object
# ------------------------------------------------------------
cust_denorm = CustomerDenorm(table_env)
# run the etl code
# ------------------------------------------------------------
try:
cust_denorm.process_job()
except Exception as e:
print(e)
|
from typing import Counter, Dict, List, Set
from aocd import lines
CaveSystem = Dict[str, Set[str]]
def parse(input: List[str] = lines):
system: CaveSystem = {}
for line in input:
[x, y] = line.split("-")
system[x] = system[x].union([y]) if x in system else set([y])
system[y] = system[y].union([x]) if y in system else set([x])
return system
def part1(system: CaveSystem = parse()):
paths: List[List[str]] = [["start"]]
result = 0
while len(paths) > 0:
new_paths: List[List[str]] = []
for path in paths:
for c in system[path[-1]]:
if c == "start":
continue
elif c == "end":
result += 1
elif c.isupper() or "end" in c or c not in path:
new_paths.append(path + [c])
paths = new_paths
return result
def part2(system: CaveSystem = parse(), p: List[str] = ["start"]):
result = 0
for c in system[p[-1]]:
if c == "start":
continue
elif c == "end":
result += 1
elif c.isupper() or c not in p or all(c <= 1 for c in Counter(c for c in p if c.islower()).values()):
result += part2(system, p + [c])
return result
if __name__ == "__main__":
print(f"Part 1: {part1()}")
print(f"Part 2: {part2()}")
|
#!/usr/local/bin/python3.6
"""
Create CSVs from healthcare.gov data extracted from files or from the website directly.
The data is filtered by plan_ids for a given state and by provider address to include
only in-state (or neighboring state) addresses.
The CSVs are output into data/healthcare_gov/[issuer_id].csv
This script will also echo plan statistics (e.g., the number of providers per plan).
"""
import argparse
import ijson
import json
import logging
import os
import urllib
import pandas as pd
from lib import etl_helper
logging.basicConfig(level=logging.INFO)
def normalize_provider(provider, plan_ids_set):
"""Flatten provider JSON blobs and filter by plan ID."""
normalized = []
addresses = provider.pop('addresses')
plans = provider.pop('plans')
specialties = provider.pop('specialty') # this is a list
name = provider.pop('name')
for plan in plans:
if plan['plan_id'] not in plan_ids_set:
continue
for address in addresses:
for specialty in specialties:
to_append = {**address, **plan, **name, **provider}
to_append['specialty'] = specialty
normalized.append(to_append)
return normalized
def filter_by_states(providers, states):
"""Filter providers by list of states to reduce the quantity of data we're processing."""
return [provider for provider in providers if provider['state'] in states]
def _main(**kwargs):
"""Manually kickoff the JSON to CSV file transformation for a given state."""
state = kwargs['state']
# Incorporate neighboring states if present.
states = [state]
if kwargs['neighboring_states']:
states.extend(kwargs['neighboring_states'])
states = set(states)
logging.info('Starting translation process for {}.'.format(state))
plans = etl_helper.extract_plans(state)
logging.info('There are {} plans listed in {}.'.format(len(plans), state))
logging.info('The plan IDs are: {}'.format([plan[0] for plan in plans]))
# TODO: Use Plan Attributes file to automatically exclude all dental plans.
# Exclude dentists if the user wants.
dental_plan_urls = [plan[1] for plan in plans if 'dent' in plan[1].lower()]
if dental_plan_urls and etl_helper.query_yes_no(
message='Would you like to exclude these {} dental plans? {}'.format(
len(dental_plan_urls), dental_plan_urls)):
plans = [plan for plan in plans if not plan[1] in dental_plan_urls]
humana_plan_urls = [plan[1] for plan in plans if 'humana' in plan[1].lower()]
if humana_plan_urls and etl_helper.query_yes_no(
message='Would you like to exclude these {} Humana plans? {}'.format(
len(humana_plan_urls), humana_plan_urls)):
plans = [plan for plan in plans if not plan[1] in humana_plan_urls]
logging.info('{} plans in {} remain.'.format(len(plans), state))
logging.info('The plan IDs are: {} .'.format([plan[0] for plan in plans]))
count_by_plan = {}
# TODO: Write an extract_plan function.
for idx, (issuer_id, plan_url) in enumerate(plans):
logging.info('Processing plan {} at url {} .'.format(issuer_id, plan_url))
logging.info('This is plan number {} of {}.'.format(idx + 1, len(plans)))
plan_ids_set = etl_helper.get_issuer_plan_ids(issuer_id)
output_path = etl_helper.HEALTHCARE_GOV_PATH + '/{}/{}.csv'.format(
state, etl_helper.clean_plan_name(issuer_id))
if os.path.exists(output_path):
logging.info('CSV {} already exists. Moving on...'.format(output_path))
continue
try:
provider_urls = etl_helper.fetch_provider_urls(plan_url)
except Exception:
logging.error(
'Error fetching provider urls for {}. Moving on...'.format(issuer_id, plan_url))
continue
logging.info('There are {} provider urls for this plan.'.format(len(provider_urls)))
# Exclude pharmacists and facilities if the user wants.
pharma_urls = [url for url in provider_urls if 'pharma' in url.lower()]
if pharma_urls and etl_helper.query_yes_no(
message='Would you like to exclude these pharma urls? {}'.format(pharma_urls)):
provider_urls = [url for url in provider_urls if url not in pharma_urls]
providers = []
for url_idx, url in enumerate(provider_urls):
logging.info('Processing {}.'.format(url))
logging.info('Processing url number {} of {}.'.format(url_idx + 1, len(provider_urls)))
try:
if kwargs['from_file']:
target_path = etl_helper.HEALTHCARE_GOV_PATH + '/{}/{}/{}.json'.format(
state, etl_helper.clean_plan_name(issuer_id), etl_helper.clean_paths(url)
)
if not os.path.exists(target_path):
logging.warning("Filepath {} doesn't exist. Skipping.".format(target_path))
continue
_file = open(target_path, 'r', encoding='latin-1')
else:
_file = urllib.request.urlopen(url)
objects = ijson.items(_file, 'item')
for provider in objects:
if provider['type'] != 'INDIVIDUAL':
continue
normalized_providers = normalize_provider(provider, plan_ids_set)
filtered_by_state = filter_by_states(normalized_providers, states)
providers.extend(filtered_by_state)
logging.info('{} successfully loaded.'.format(url))
if kwargs['from_file']:
_file.close()
except Exception:
logging.exception('Error loading {}. Continuing...'.format(url))
continue
logging.info('{} providers loaded for {}.'.format(len(providers), issuer_id))
# Load data into a DataFrame then export as CSV.
try:
individuals = pd.DataFrame(providers)
print(individuals.head())
individuals.rename(
columns={'first': 'first name', 'last': 'last name', 'middle': 'middle name'},
inplace=True
)
individuals.columns = [col.title() for col in individuals.columns]
individuals.to_csv(output_path, index=False)
logging.info('CSV for {} with {} rows written to {}.'.format(
issuer_id, individuals.shape[0], output_path))
count_by_plan[issuer_id] = individuals.shape[0]
except AttributeError:
logging.exception('Something has gone wrong with loading data into the DataFrame.')
logging.info('Sample provider causing an error: {}.'.format(providers[0]))
logging.info('Final tally of processed providers: {}.'.format(count_by_plan))
def _get_arguments():
"""Build argument parser."""
parser = argparse.ArgumentParser(description='''
This script starts the process of converting JSON plan data to CSVs.'
''')
parser.add_argument(
'-s', '--state',
help='State to extract data from.',
required=True,
type=str
)
parser.add_argument(
'-ff', '--from_file',
help='Flag for reading from filesystem instead of url.',
action='store_true'
)
parser.add_argument(
'-ns', '--neighboring_states',
help='List of neighboring states to save data for.',
required=False,
nargs='+',
type=str
)
args = parser.parse_args()
return args.__dict__
if __name__ == '__main__':
_main(**_get_arguments())
|
# coding: utf-8
# Created on: 15.01.2018
# Author: Roman Miroshnychenko aka Roman V.M. (roman1972@gmail.com)
import scrapy
class VideosSpider(scrapy.Spider):
"""Parse videos from vidsplay.com"""
name = 'videos'
start_url = 'https://www.vidsplay.com'
def start_requests(self):
"""Entry point for our spider"""
yield scrapy.Request(self.start_url, callback=self.parse)
def parse(self, response):
"""Parse vidsplay.com index page"""
category_urls = response.xpath(
'/html/body/div[1]/div/div/div/aside/section[3]/div/ul/li/a/@href'
).extract()
for url in category_urls[:3]: # We want to be nice and scrap only 3 items
yield response.follow(url, callback=self.parse_category)
def parse_category(self, response):
"""Parse a video category page"""
base_selector = response.xpath(
'/html/body/div[1]/div/div/div/div/main/article'
)
category = base_selector.xpath(
'./header/h1/text()'
).extract_first()
video_selectors = base_selector.xpath(
'./div/div[1]/div/div/div/div[@class="pt-cv-ifield"]'
)
for selector in video_selectors[:3]: # We want to be nice and scrap only 3 items
url = selector.xpath('./p/a/@href').extract_first()
# ``meta`` argument can be used to pass data to downstream spider callbacks
yield response.follow(url,
callback=self.parse_video,
meta={'category': category})
def parse_video(self, response):
"""Parse a video details page"""
base_selector = response.xpath(
'/html/body/div[1]/div/div/div/div/main/article/div'
)
title = base_selector.xpath(
'./header/h1/text()'
).extract_first()
thumbnail = base_selector.xpath(
'./div/div[2]/div[1]/meta[@itemprop="thumbnailUrl"]/@content'
).extract_first()
url = base_selector.xpath(
'./div/div[2]/div[1]/meta[@itemprop="contentURL"]/@content'
).extract_first()
yield {
'category': response.meta['category'],
'title': title,
'thumbnail': thumbnail,
'url': url
}
|
#!/usr/bin/env python3
"""
Tests for the inner product Tensorflow operation.
.. moduleauthor:: David Stutz
"""
import unittest
import numpy as np
import tensorflow as tf
import _inner_product_grad
inner_product_module = tf.load_op_library('libinner_product.so')
class InnerProductOpTest(unittest.TestCase):
def test_raisesExceptionWithIncompatibleDimensions(self):
with tf.Session(''):
with self.assertRaises(ValueError):
inner_product_module.inner_product([1, 2], [[1, 2], [3, 4]]).eval()
with self.assertRaises(ValueError):
self.assertRaises(inner_product_module.inner_product([1, 2], [1, 2, 3, 4]).eval(), ValueError)
with self.assertRaises(ValueError):
self.assertRaises(inner_product_module.inner_product([1, 2, 3], [[1, 2], [3, 4]]).eval(), ValueError)
def test_innerProductHardCoded(self):
with tf.Session(''):
result = inner_product_module.inner_product([[1], [2]], [[1, 2], [3, 4]]).eval()
self.assertEqual(result.shape[0], 2)
self.assertEqual(result[0], 5)
self.assertEqual(result[1], 11)
def test_innerProductGradientXHardCoded(self):
with tf.Session('') as sess:
x = tf.placeholder(tf.float32, shape = (2))
W = tf.constant(np.asarray([[1, 2], [3, 4]]).astype(np.float32))
Wx_tf = tf.matmul(W, tf.reshape(x, [-1, 1]))
Wx_inner_product = inner_product_module.inner_product(tf.reshape(x, [-1, 1]), W)
grad_x_tf = tf.gradients(Wx_tf, x)
grad_x_inner_product = tf.gradients(Wx_inner_product, x)
gradient_tf = sess.run(grad_x_tf, feed_dict = {x: np.asarray([1, 2]).astype(np.float32)})
gradient_inner_product = sess.run(grad_x_inner_product, feed_dict = {x: np.asarray([1, 2]).astype(np.float32)})
self.assertEqual(gradient_tf[0][0], gradient_inner_product[0][0])
self.assertEqual(gradient_tf[0][1], gradient_inner_product[0][1])
def test_innerProductGradientWHardCoded(self):
with tf.Session('') as sess:
x = tf.constant(np.asarray([1, 2]).astype(np.float32))
W = tf.placeholder(tf.float32, shape = (2, 2))
Wx_tf = tf.matmul(W, tf.reshape(x, [-1, 1]))
Wx_inner_product = inner_product_module.inner_product(tf.reshape(x, [-1, 1]), W)
grad_W_tf = tf.gradients(Wx_tf, W)
grad_W_inner_product = tf.gradients(Wx_inner_product, W)
gradient_tf = sess.run(grad_W_tf, feed_dict = {W: np.asarray([[1, 2], [3, 4]]).astype(np.float32)})
gradient_inner_product = sess.run(grad_W_inner_product, feed_dict = {W: np.asarray([[1, 2], [3, 4]]).astype(np.float32)})
self.assertEqual(gradient_tf[0][0][0], gradient_inner_product[0][0][0])
self.assertEqual(gradient_tf[0][0][1], gradient_inner_product[0][0][1])
self.assertEqual(gradient_tf[0][1][0], gradient_inner_product[0][1][0])
self.assertEqual(gradient_tf[0][1][1], gradient_inner_product[0][1][1])
def test_innerProductRandom(self):
with tf.Session(''):
n = 4
m = 5
for i in range(100):
x_rand = np.random.randint(10, size = (n, 1))
W_rand = np.random.randint(10, size = (m, n))
result_rand = np.dot(W_rand, x_rand)
result = inner_product_module.inner_product(x_rand, W_rand).eval()
np.testing.assert_array_equal(result, result_rand)
def test_innerProductGradientXRandom(self):
with tf.Session('') as sess:
n = 4
m = 5
x = tf.placeholder(tf.float32, shape = (n))
W = tf.placeholder(tf.float32, shape = (m, n))
Wx_tf = tf.matmul(W, tf.reshape(x, [-1, 1]))
Wx_inner_product = inner_product_module.inner_product(tf.reshape(x, [-1, 1]), W)
grad_x_tf = tf.gradients(Wx_tf, x)
grad_x_inner_product = tf.gradients(Wx_inner_product, x)
for i in range(100):
x_rand = np.random.randint(10, size = (n))
W_rand = np.random.randint(10, size = (m, n))
gradient_tf = sess.run(grad_x_tf, feed_dict = {x: x_rand, W: W_rand})
gradient_inner_product = sess.run(grad_x_inner_product, feed_dict = {x: x_rand, W: W_rand})
np.testing.assert_array_equal(gradient_tf, gradient_inner_product)
def test_innerProductGradientWRandom(self):
with tf.Session('') as sess:
n = 4
m = 5
x = tf.placeholder(tf.float32, shape = (n))
W = tf.placeholder(tf.float32, shape = (m, n))
Wx_tf = tf.matmul(W, tf.reshape(x, [-1, 1]))
Wx_inner_product = inner_product_module.inner_product(tf.reshape(x, [-1, 1]), W)
grad_W_tf = tf.gradients(Wx_tf, W)
grad_W_inner_product = tf.gradients(Wx_inner_product, W)
for i in range(100):
x_rand = np.random.randint(10, size = (n))
W_rand = np.random.randint(10, size = (m, n))
gradient_tf = sess.run(grad_W_tf, feed_dict = {x: x_rand, W: W_rand})
gradient_inner_product = sess.run(grad_W_inner_product, feed_dict = {x: x_rand, W: W_rand})
np.testing.assert_array_equal(gradient_tf, gradient_inner_product)
if __name__ == '__main__':
unittest.main() |
import hashlib
import hmac
from urllib.parse import urlencode, quote_plus
import os
import requests
import json
from base.api import BaseApi
from config.settings import *
CURRENCY_PAIR_BTC_JPY = 'btc_jpy'
END_POINT = 'https://api.zaif.jp/api/1'
END_POINT_LATEST_TRADE = 'https://api.zaif.jp/tapi'
NONCE_FILE = 'zaif_nonce.txt'
class ZaifApi(BaseApi):
def get_currency_pair(self, currency_pair=None):
if currency_pair is None:
return CURRENCY_PAIR_BTC_JPY
return currency_pair
def load_nonce(self):
if not os.path.exists(NONCE_FILE):
return 0
f = open(NONCE_FILE, 'r')
nonce = int(f.readline())
f.close()
return nonce
def save_nonce(self, nonce):
f = open(NONCE_FILE, 'w')
f.write(str(nonce))
f.close()
def request_balance(self):
result = self.request_latest_trade_api({'method': 'get_info', })
result = result['funds']
return result
def request_currency_pairs(self, currency_pair='all'):
response = requests.get('{}/currency_pairs/{}'.format(END_POINT, quote_plus(currency_pair)))
if response.status_code != 200:
raise Exception('return status code is {}'.format(response.status_code))
result = json.loads(response.text)
return result
def request_last_price(self, currency_pair=None):
currency_pair = self.get_currency_pair(currency_pair)
response = requests.get('{}/last_price/{}'.format(END_POINT, quote_plus(currency_pair)))
if response.status_code != 200:
raise Exception('return status code is {}'.format(response.status_code))
result = json.loads(response.text)
return result['last_price']
def request_latest_trade_api(self, parameters=None):
nonce = self.load_nonce()
nonce += 1
self.save_nonce(nonce)
parameters['nonce'] = nonce
encoded = urlencode(parameters)
signature = hmac.new(bytearray(ZAIF_API_SECRET.encode('utf-8')), digestmod=hashlib.sha512)
signature.update(encoded.encode('utf-8'))
headers = {'key': ZAIF_API_KEY, 'sign': signature.hexdigest()}
response = requests.post(END_POINT_LATEST_TRADE, data=encoded, headers=headers)
if response.status_code != 200:
raise Exception('return status code is {}'.format(response.status_code))
result = json.loads(response.text)
if int(result['success']) != 1:
raise Exception('return success code is {}'.format(result['success']))
return result['return']
def request_trade(self, amount, is_ask, price=None, currency_pair=None, **options):
currency_pair = self.get_currency_pair(currency_pair)
parameters = {
'currency_pair': currency_pair,
'action': ('ask' if is_ask else 'bid'),
'price': price,
'amount': amount,
}
if 'limit' in options:
parameters['limit'] = options['limit']
if 'comment' in options:
parameters['comment'] = options['comment']
result = self.request_latest_trade_api(parameters)
return result
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
from lte.protos.mobilityd_pb2 import IPAddress
from lte.protos.policydb_pb2 import FlowMatch
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import (
DPI_REG,
Direction,
load_direction,
)
from ryu.lib.packet import ether_types
MATCH_ATTRIBUTES = ['metadata', 'reg0', 'reg1', 'reg2', 'reg3', 'reg4', 'reg5',
'reg6', 'reg8', 'reg9', 'reg10',
'in_port', 'dl_vlan', 'vlan_tci',
'eth_type', 'dl_dst', 'dl_src',
'arp_tpa', 'arp_spa', 'arp_op',
'ipv4_dst', 'ipv4_src', 'ipv6_src', 'ipv6_dst',
'ip_proto', 'tcp_src', 'tcp_dst', 'udp_src', 'udp_dst']
class FlowMatchError(Exception):
pass
def _check_pkt_protocol(match):
'''
Verify that the match flags are set properly
Args:
match: FlowMatch
'''
if (match.tcp_dst or match.tcp_src) and (match.ip_proto !=
match.IPPROTO_TCP):
raise FlowMatchError("To use tcp rules set ip_proto to IPPROTO_TCP")
if (match.udp_dst or match.udp_src) and (match.ip_proto !=
match.IPPROTO_UDP):
raise FlowMatchError("To use udp rules set ip_proto to IPPROTO_UDP")
return True
def flow_match_to_magma_match(match, ip_addr=None):
'''
Convert a FlowMatch to a MagmaMatch object
Args:
match: FlowMatch
'''
_check_pkt_protocol(match)
match_kwargs = {'eth_type': ether_types.ETH_TYPE_IP}
attributes = ['ip_dst', 'ip_src',
'ip_proto', 'tcp_src', 'tcp_dst',
'udp_src', 'udp_dst', 'app_name']
for attrib in attributes:
value = getattr(match, attrib, None)
if not value:
continue
if attrib in {'ip_dst', 'ip_src'}:
if not value.address:
continue
decoded_ip = _get_ip_tuple(value.address.decode('utf-8'))
if value is None:
return
if value.version == IPAddress.IPV4:
if attrib == 'ip_src':
match_kwargs['ipv4_src'] = decoded_ip
elif attrib == 'ip_dst':
match_kwargs['ipv4_dst'] = decoded_ip
else:
match_kwargs['eth_type'] = ether_types.ETH_TYPE_IPV6
if attrib == 'ip_src':
match_kwargs['ipv6_src'] = decoded_ip
elif attrib == 'ip_dst':
match_kwargs['ipv6_dst'] = decoded_ip
continue
elif attrib == 'app_name':
attrib = DPI_REG
match_kwargs[attrib] = value
# Specific UE IP match
if ip_addr:
if ip_addr.version == IPAddress.IPV4:
ip_src_reg = 'ipv4_src'
ip_dst_reg = 'ipv4_dst'
else:
match_kwargs['eth_type'] = ether_types.ETH_TYPE_IPV6
ip_src_reg = 'ipv6_src'
ip_dst_reg = 'ipv6_dst'
if ip_addr.address.decode('utf-8'):
if get_direction_for_match(match) == Direction.OUT:
match_kwargs[ip_src_reg] = ip_addr.address.decode('utf-8')
else:
match_kwargs[ip_dst_reg] = ip_addr.address.decode('utf-8')
return MagmaMatch(direction=get_direction_for_match(match),
**match_kwargs)
def flow_match_to_actions(datapath, match):
'''
Convert a FlowMatch to list of actions to get the same packet
Args:
match: FlowMatch
'''
parser = datapath.ofproto_parser
_check_pkt_protocol(match)
# Eth type and ip proto are read only, can't set them here (set on pkt init)
actions = [
parser.OFPActionSetField(ipv4_src=getattr(match, 'ipv4_src', '1.1.1.1')),
parser.OFPActionSetField(ipv4_dst=getattr(match, 'ipv4_dst', '1.2.3.4')),
load_direction(parser, get_direction_for_match(match)),
parser.NXActionRegLoad2(dst=DPI_REG, value=getattr(match, 'app_id', 0)),
]
if match.ip_proto == FlowMatch.IPPROTO_TCP:
actions.extend([
parser.OFPActionSetField(tcp_src=getattr(match, 'tcp_src', 0)),
parser.OFPActionSetField(tcp_dst=getattr(match, 'tcp_dst', 0))
])
elif match.ip_proto == FlowMatch.IPPROTO_UDP:
actions.extend([
parser.OFPActionSetField(udp_src=getattr(match, 'udp_src', 0)),
parser.OFPActionSetField(udp_dst=getattr(match, 'udp_dst', 0))
])
return actions
def flip_flow_match(match):
'''
Flips FlowMatch(ip/ports/direction)
Args:
match: FlowMatch
'''
if getattr(match, 'direction', None) == match.DOWNLINK:
direction = match.UPLINK
else:
direction = match.DOWNLINK
return FlowMatch(
ip_src=getattr(match, 'ip_dst', None),
ip_dst=getattr(match, 'ip_src', None),
tcp_src=getattr(match, 'tcp_dst', None),
tcp_dst=getattr(match, 'tcp_src', None),
udp_src=getattr(match, 'udp_dst', None),
udp_dst=getattr(match, 'udp_src', None),
ip_proto=getattr(match, 'ip_proto', None),
direction=direction,
app_name=getattr(match, 'app_name', None)
)
def get_flow_ip_dst(match):
ip_dst = getattr(match, 'ip_dst', None)
if ip_dst is None:
return
decoded_ip = ip_dst.address.decode('utf-8')
if ip_dst.version == IPAddress.IPV4:
return decoded_ip
else:
return None
def ipv4_address_to_str(ipaddr: IPAddress):
decoded_ip = ipaddr.address.decode('utf-8')
if ipaddr.version == IPAddress.IPV4:
return decoded_ip
else:
return None
def get_ue_ip_match_args(ip_addr: IPAddress, direction: Direction):
ip_match = {}
if ip_addr:
if ip_addr.version == ip_addr.IPV4:
ip_src_reg = 'ipv4_src'
ip_dst_reg = 'ipv4_dst'
else:
ip_src_reg = 'ipv6_src'
ip_dst_reg = 'ipv6_dst'
if not ip_addr.address.decode('utf-8'):
return ip_match
if direction == Direction.OUT:
ip_match = {ip_src_reg: ip_addr.address.decode('utf-8')}
else:
ip_match = {ip_dst_reg: ip_addr.address.decode('utf-8')}
return ip_match
def get_eth_type(ip_addr: IPAddress):
if not ip_addr:
return ether_types.ETH_TYPE_IP
if ip_addr.version == IPAddress.IPV4:
return ether_types.ETH_TYPE_IP
else:
return ether_types.ETH_TYPE_IPV6
def _get_ip_tuple(ip_str):
'''
Convert an ip string to a formatted block tuple
Args:
ip_str (string): ip string to parse
'''
try:
ip_block = ipaddress.ip_network(ip_str)
except ValueError as err:
raise FlowMatchError("Invalid Ip block: %s" % err)
block_tuple = '{}'.format(ip_block.network_address), \
'{}'.format(ip_block.netmask)
return block_tuple
def get_direction_for_match(flow_match):
if flow_match.direction == flow_match.UPLINK:
return Direction.OUT
return Direction.IN
def convert_ipv4_str_to_ip_proto(ipv4_str):
return IPAddress(version=IPAddress.IPV4,
address=ipv4_str.encode('utf-8'))
def convert_ipv6_str_to_ip_proto(ipv6_str):
return IPAddress(version=IPAddress.IPV6,
address=ipv6_str.encode('utf-8'))
def convert_ipv6_bytes_to_ip_proto(ipv6_bytes):
return IPAddress(version=IPAddress.IPV6,
address=ipv6_bytes)
def convert_ip_str_to_ip_proto(ip_str: str):
if ip_str.count(":") >= 2:
ip_addr = \
convert_ipv6_bytes_to_ip_proto(ip_str.encode('utf-8'))
else:
ip_addr = convert_ipv4_str_to_ip_proto(ip_str)
return ip_addr
def ovs_flow_match_to_magma_match(flow):
attribute_dict = {}
for a in MATCH_ATTRIBUTES:
val = flow.match.get(a, None)
if val:
attribute_dict[a] = val
return MagmaMatch(**attribute_dict)
|
# python_lists.py
# Julie M. Anderson
# samples of lists in python
# Lists are heterogenous in python
aList = [1, 2.0, "aString", False]
print "A heterogenous list: ", aList
# Operations on lists
sampleList = [1, 2, 3, 4]
print "My sample list: ", sampleList
# indexing using []
print "sampleList[1]: " , sampleList[1]
# concatenation using +
anotherList = ["cat", "dog"]
print "Another sample list: ", anotherList
print "sampleList + anotherList: ", sampleList + anotherList
# repetition using *
print "sampleList * 3: ", sampleList * 3
# membership using in -- checks to see if item exists in a sequence
print "4 in sampleList: ", 4 in sampleList
print "8 in sampleList: ", 8 in sampleList
# number of items in a sequence using len
print "len(anotherList): ", len(anotherList)
# slicing using [:] starts at index of first number, up to but not including second
print "sampleList: ", sampleList
print "sampleList[1:2]: ", sampleList[1:2]
# A list is a collection of references to Python data objects!
print "Be careful with lists! they are references!"
print "newList = [sampleList] * 3"
newList = [sampleList] * 3
print "newList:", newList
print "sampleList[2] = 256"
sampleList[2] = 256
print "newList: ", newList
# Built in list methods
aList = [1, 'cat', 5.0]
print "aList: ", aList
# append aList.append(item) Add item to end of aList
aList.append(2)
print "aList.append(2): ", aList
# insert aList.insert(index, item) inserts item at index
aList.insert(1, "x")
print "aList.insert(1, 'x'): ", aList
# pop aList.pop() removes and returns the last item
print ".pop() removes and returns the last item."
print "aList.pop(): ", aList.pop()
print "aList: ", aList
# you can also pop at an index aList.pop(index)
print "aList.pop(2): ", aList.pop(2)
print "aList: ", aList
# sort aList.sort(), aList.reverse()
listToSort = [5, 12, 7, 11, 0, 3]
print "listToSort: ", listToSort
listToSort.sort()
print "listToSort.sort(): ", listToSort
listToSort.reverse()
print "listToSort.reverse(): ", listToSort
# del del aList[index] deletes item at index
print aList
del aList[2]
print "del aList[2]"
print "aList: ", aList
# index aList.index(item) returns the index of the 1st occurence of the item
print "aList.index('x'): ", aList.index('x')
# count aList.count(item) returns the number of occurrences of the item
numberList = [1] * 3
print "numberList: ", numberList
print "numberList.count(1): ", numberList.count(1)
# remove aList.remove(item) removes first occurrence of item
print "numberList.remove(1)"
numberList.remove(1)
print "numberList: ", numberList
|
from django.contrib import admin
from friendship.models import Friendship
# Register your models here.
# Register your models here.
class FriendshipAdmin(admin.ModelAdmin):
list_display = ['req_from', 'req_to', 'is_accepted', 'created_at', 'updated_at']
admin.site.register(Friendship, FriendshipAdmin) |
import concurrent.futures
import functools
import threading
class Controller:
def __init__(self):
self.ok = False
def start(self):
self.ok = True
def stop(self):
self.ok = False
|
# This Python file uses the following encoding: utf-8
""" Subject line.
Main text.
"""
from unittest import TestCase
from classes.Index import index_for
from classes.Variable import Variable
__author__ = 'Chao Li'
class TestIndexFor(TestCase):
def setUp(self):
self.A = Variable(1, "A", ['a1', 'a2', 'a3'])
self.B = Variable(2, "B", ['b1', 'b2'])
self.C = Variable(3, "C", ['c1', 'b2'])
self.X = [ self.B, self.A]
self.Y = [ self.C, self.B]
self.Z = [ self.C, self.B, self.A]
def test_IndexFor(self):
"""
References
----------
D. Koller and N. Friedman (2009). Probabilistic Graphical Models: Principles and Techniques. edited by . MIT Press.
page 107, Figure 4.3 An example of factor product
"""
index_X4Z = index_for(self.X, self.Z)
assert 0 == index_X4Z[0]
assert 0 == index_X4Z[1]
assert 1 == index_X4Z[2]
assert 1 == index_X4Z[3]
assert 5 == index_X4Z[10]
assert 5 == index_X4Z[11]
index_Y4Z = index_for(self.Y, self.Z)
assert 0 == index_Y4Z[8]
assert 1 == index_Y4Z[9]
assert 2 == index_Y4Z[10]
assert 3 == index_Y4Z[11]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('spirit', '0002_auto_20150601_1042'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='parent',
),
migrations.RemoveField(
model_name='comment',
name='topic',
),
migrations.RemoveField(
model_name='comment',
name='user',
),
migrations.AlterUniqueTogether(
name='commentbookmark',
unique_together=None,
),
migrations.RemoveField(
model_name='commentbookmark',
name='topic',
),
migrations.RemoveField(
model_name='commentbookmark',
name='user',
),
migrations.DeleteModel(
name='CommentBookmark',
),
migrations.RemoveField(
model_name='commentflag',
name='comment',
),
migrations.RemoveField(
model_name='commentflag',
name='moderator',
),
migrations.DeleteModel(
name='CommentFlag',
),
migrations.RemoveField(
model_name='commenthistory',
name='comment_fk',
),
migrations.DeleteModel(
name='CommentHistory',
),
migrations.AlterUniqueTogether(
name='commentlike',
unique_together=None,
),
migrations.RemoveField(
model_name='commentlike',
name='comment',
),
migrations.RemoveField(
model_name='commentlike',
name='user',
),
migrations.DeleteModel(
name='CommentLike',
),
migrations.AlterUniqueTogether(
name='flag',
unique_together=None,
),
migrations.RemoveField(
model_name='flag',
name='comment',
),
migrations.RemoveField(
model_name='flag',
name='user',
),
migrations.DeleteModel(
name='Flag',
),
migrations.RemoveField(
model_name='topic',
name='category',
),
migrations.DeleteModel(
name='Category',
),
migrations.RemoveField(
model_name='topic',
name='user',
),
migrations.AlterUniqueTogether(
name='topicfavorite',
unique_together=None,
),
migrations.RemoveField(
model_name='topicfavorite',
name='topic',
),
migrations.RemoveField(
model_name='topicfavorite',
name='user',
),
migrations.DeleteModel(
name='TopicFavorite',
),
migrations.AlterUniqueTogether(
name='topicnotification',
unique_together=None,
),
migrations.RemoveField(
model_name='topicnotification',
name='comment',
),
migrations.DeleteModel(
name='Comment',
),
migrations.RemoveField(
model_name='topicnotification',
name='topic',
),
migrations.RemoveField(
model_name='topicnotification',
name='user',
),
migrations.DeleteModel(
name='TopicNotification',
),
migrations.AlterUniqueTogether(
name='topicprivate',
unique_together=None,
),
migrations.RemoveField(
model_name='topicprivate',
name='topic',
),
migrations.RemoveField(
model_name='topicprivate',
name='user',
),
migrations.DeleteModel(
name='TopicPrivate',
),
migrations.AlterUniqueTogether(
name='topicunread',
unique_together=None,
),
migrations.RemoveField(
model_name='topicunread',
name='topic',
),
migrations.DeleteModel(
name='Topic',
),
migrations.RemoveField(
model_name='topicunread',
name='user',
),
migrations.DeleteModel(
name='TopicUnread',
),
]
|
$NetBSD: patch-breezy_tests_test__server.py,v 1.1 2019/10/27 13:05:46 rhialto Exp $
Stub out call to shutdown() since it mysteriously fails with
an OSError exception with EINVAL.
See https://bugs.launchpad.net/brz/+bug/1849971
--- breezy/tests/test_server.py.orig 2019-06-16 22:18:58.000000000 +0000
+++ breezy/tests/test_server.py
@@ -381,7 +381,7 @@ class TestingTCPServerMixin(object):
socket.
"""
try:
- sock.shutdown(socket.SHUT_RDWR)
+ # sock.shutdown(socket.SHUT_RDWR) # fails with EINVAL
sock.close()
except Exception as e:
if self.ignored_exceptions(e):
|
import logging
import argparse
import random
import requests
from resources.banner import banner
from resources.headers import user_agents
from assets.colors import green,red,white,reset
logging.basicConfig(format=f"%(asctime)s {white}%(message)s{reset}"
,datefmt=f"{white}%I{green}:{white}%M{green}:{white}%S%p",level=logging.DEBUG)
print(banner)
class Search:
def __init__ (self):
self.base = "https://api.github.com/users/"
self.headers = {"User-Agent": f"{random.choice(user_agents)}"}
def main(self,username):
base = self.base+username
while True:
try:
data = requests.get(base, headers=self.headers).json()
print(f"""{white}
{data['name']}
├ Profile photo: {green}{data['avatar_url']}{white}
├─ Account type: {green}{data['type']}{white}
├── Username: {green}{data['login']}{white}
├─ User ID: {green}{data['id']}{white}
├─── Node ID: {green}{data['node_id']}{white}
├── Location: {green}{data['location']}{white}
├─ Followers: {green}{data['followers']}{white}
├──── Following: {green}{data['following']}{white}
├── Blog: {green}{data['blog']}{white}
├─ Bio: {green}{data['bio']}{white}
├──── Public gists: {green}{data['public_gists']}{white}
├─── Public repositories: {green}{data['public_repos']}{white}
├── Is site admin: {red}{data['site_admin']}{white}
├───── Is hireable: {green}{data['hireable']}{white}
├─ Organization: {green}{data['company']}{white}
├─── Twitter handle: {green}@{data['twitter_username']}{white}
├── Joined on: {green}{data['created_at']}{white}
└╼ Last updated on: {green}{data['updated_at']}{reset}
""")
break
except Exception as e:
logging.info(f"Error: {red}{e}{reset}")
logging.info(f"Retrying...{reset}")
except KeyboardInterrupt:
exit()
|
from __future__ import print_function
import json
import sys
import traceback
import os
from ircutils import bot
import commands
import custom_commands
class berry(bot.SimpleBot):
def __init__(self, config):
nick = config['nick'].encode('ascii', 'replace')
bot.SimpleBot.__init__(self, nick)
self.config = config
self.banned_words = set()
self.checking_for_banned_words = 0
def send_message(self, to, message):
try:
super(berry, self).send_message(to, message.encode('utf-8', 'replace'))
except UnicodeDecodeError:
super(berry, self).send_message(to, message.decode('utf-8').encode('utf-8', 'replace'))
def send_action(self, to, message):
try:
super(berry, self).send_action(to, message.encode('utf-8', 'replace'))
except UnicodeDecodeError:
super(berry, self).send_action(to, message.decode('utf-8').encode('utf-8', 'replace'))
def command_help(self, event):
'''Usage: ~help <command> The fuck do you think it does?'''
# Get commands with documentation
documented_commands = {
x[8:]: self.cmds[x].__doc__
for x in self.cmds
if self.cmds[x].__doc__ is not None and
((event.respond not in self.config['sfwchans'].split(',')) or
(not hasattr(self.cmds[x], 'nsfw')))
}
# If no params, send list of commands
if len(event.params) < 1:
self.send_message(event.respond, "Currently supported commands: %s"
% ', '.join(documented_commands.keys()))
# If the param is documented, send the doc string for it
elif event.params in documented_commands:
self.send_message(event.respond, documented_commands[event.params])
# If the param is undocumented, send unsupported
else:
self.send_message(event.respond, "Unsupported command")
def reload_commands(self):
# Reloading
self.config = loadconf('config.json')
reload(commands)
reload(custom_commands)
self.lastloadconf = os.stat('config.json').st_mtime
self.lastloadcommands = os.stat('commands.py').st_mtime
self.lastloadcustomcommands = os.stat('custom_commands.py').st_mtime
# Create objects for commands and custom_commands
cmd = commands.commands(self.send_message, self.send_action,
self.banned_words, self.config)
cust_cmd = custom_commands.custom_commands(
self.send_message, self.send_action, self.config)
# Method to get all callable objects with a given prefix from a given object
def get_methods(obj, prefix):
return {
x: getattr(obj, x)
for x in dir(obj)
if x.startswith(prefix) and callable(getattr(obj, x))
}
# Get all regexes from all files, overwriting ones in commands and self with those in custom_commands
self.regexes = get_methods(cmd, 'regex_')
self.regexes.update(get_methods(self, 'regex_'))
self.regexes.update(get_methods(cust_cmd, 'regex_'))
# Get all commands from all files, overwriting ones in commands and self with those in custom_commands
self.cmds = get_methods(cmd, 'command_')
self.cmds.update(get_methods(self, 'command_'))
self.cmds.update(get_methods(cust_cmd, 'command_'))
def privmsg(self, event):
# Reload config and commands.
if os.stat('config.json').st_mtime > self.lastloadconf or os.stat(
'commands.py').st_mtime > self.lastloadcommands or os.stat(
'custom_commands.py'
).st_mtime > self.lastloadcustomcommands:
self.reload_commands()
event.command = event.message.split(' ')[0]
try:
event.params = event.message.split(' ', 1)[1]
except:
event.params = ''
# Execute regexes
for regex in self.regexes:
self.regexes[regex](event)
# Execute command
if event.command[0] in self.config['prefixes'].split(
) and 'command_%s' % event.command[1:].lower() in self.cmds:
comm = self.cmds['command_%s' % event.command[1:].lower()]
if not (event.respond in self.config['sfwchans'].split(',') and
hasattr(comm, 'nsfw')):
comm(event)
def on_any(self, event):
try:
event.paramstr = ' '.join(event.params)
event.respond = event.target if event.target != self.nickname else event.source
if not event.source == self.nickname:
if event.command == 'INVITE':
self.join_channel(event.params[0])
# after joining a channel, send mode g command to check for banned words
if event.command == "RPL_ENDOFNAMES":
channel = event.params[0]
self.checking_for_banned_words += 1
self.execute("MODE", channel, "g")
# take banned words from server messages
if is_int(event.command) and self.checking_for_banned_words > 0:
if len(
event.params
) == 4 and event.params[0] in config['channels'].split(
',') and is_int(event.params[3]):
self.banned_words.add(event.params[1])
if len(
event.params
) == 2 and event.params[1] == 'End of channel spamfilter list':
self.checking_for_banned_words -= 1
# update banned word list when someone uses mode +/-g
if event.command == 'MODE' and len(event.params) >= 2:
if event.params[0] == '+g':
self.banned_words.add(event.params[1])
if event.params[0] == '-g' and event.params[1] in self.banned_words:
self.banned_words.remove(event.params[1])
if event.command in ['PRIVMSG']:
self.privmsg(event)
except:
print("ERROR", str(sys.exc_info()))
print(traceback.print_tb(sys.exc_info()[2]))
def loadconf(filename):
if os.path.isfile(filename):
with open(filename, 'r') as conffile:
return json.load(conffile)
else:
defaultConf = dict(
debug=False,
nick='Berry',
server='127.0.0.1',
channels='#bottest',
imgurKey='',
wolframKey='',
prefixes='~ . !',
traktKey='',
googleKey='',
googleengine='015980026967623760357:olr5wqcaob8',
sfwchans='#channel1,#channel2',
yiffs=['2furry4me'])
with open(filename, 'w') as conffile:
json.dump(
defaultConf,
conffile,
sort_keys=True,
indent=4,
separators=(',', ': '))
return defaultConf
def is_int(s):
try:
int(s)
except ValueError:
return False
return True
if __name__ == "__main__":
config = loadconf("config.json")
s = berry(config)
s.connect(
config['server'].encode('ascii', 'replace'),
channel=config['channels'].encode('ascii', 'replace'),
use_ssl=False, password=config['password'].encode('ascii', 'replace'))
s.lastloadconf = 0
s.lastloadcommands = 0
s.lastloadcustomcommands = 0
print('starting')
s.start()
|
from pineapple_core.core.node import node
@node(module="String", name="ToInt", autotrigger=True)
def string_to_int_node(a: str) -> int:
return int(a)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-21 04:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='article',
name='abstract',
field=models.TextField(blank=True, help_text='\u53ef\u9009\u9879\uff0c\u82e5\u4e3a\u7a7a\u5219\u6458\u53d6\u6b63\u6587\u94b154\u4e2a\u5b57\u7b26', null=True, verbose_name='\u6458\u8981'),
),
]
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
import ess.wfm as wfm
import scipp as sc
import scippneutron as scn
import pytest
def test_basic_stitching():
frames = sc.Dataset()
shift = -5.0
frames['time_min'] = sc.array(dims=['frame'], values=[0.0], unit=sc.units.us)
frames['time_max'] = sc.array(dims=['frame'], values=[10.0], unit=sc.units.us)
frames['time_correction'] = sc.array(dims=['frame'],
values=[shift],
unit=sc.units.us)
frames["wfm_chopper_mid_point"] = sc.vector(value=[0., 0., 2.0], unit='m')
data = sc.DataArray(data=sc.ones(dims=['t'], shape=[100], unit=sc.units.counts),
coords={
't':
sc.linspace(dim='t',
start=0.0,
stop=10.0,
num=101,
unit=sc.units.us),
'source_position':
sc.vector(value=[0., 0., 0.], unit='m')
})
nbins = 10
stitched = wfm.stitch(data=data, dim='t', frames=frames, bins=nbins)
# Note dimension change to TOF as well as shift
assert sc.identical(
sc.values(stitched),
sc.DataArray(data=sc.ones(dims=['tof'], shape=[nbins], unit=sc.units.counts) *
nbins,
coords={
'tof':
sc.linspace(dim='tof',
start=0.0 - shift,
stop=10.0 - shift,
num=nbins + 1,
unit=sc.units.us),
'source_position':
sc.vector(value=[0., 0., 2.], unit='m')
}))
def _do_stitching_on_beamline(wavelengths, dim, event_mode=False):
# Make beamline parameters for 6 frames
coords = wfm.make_fake_beamline(nframes=6)
# They are all created half-way through the pulse.
# Compute their arrival time at the detector.
alpha = 2.5278e-4 * (sc.Unit('s') / sc.Unit('angstrom') / sc.Unit('m'))
dz = sc.norm(coords['position'] - coords['source_position'])
arrival_times = sc.to_unit(
alpha * dz * wavelengths,
'us') + coords['source_pulse_t_0'] + (0.5 * coords['source_pulse_length'])
coords[dim] = arrival_times
# Make a data array that contains the beamline and the time coordinate
tmin = sc.min(arrival_times)
tmax = sc.max(arrival_times)
dt = 0.1 * (tmax - tmin)
if event_mode:
num = 2
else:
num = 2001
time_binning = sc.linspace(dim=dim,
start=(tmin - dt).value,
stop=(tmax + dt).value,
num=num,
unit=dt.unit)
events = sc.DataArray(data=sc.ones(dims=['event'],
shape=arrival_times.shape,
unit=sc.units.counts,
variances=True),
coords=coords)
if event_mode:
da = sc.bin(events, edges=[time_binning])
else:
da = sc.histogram(events, bins=time_binning)
# Find location of frames
frames = wfm.get_frames(da)
stitched = wfm.stitch(frames=frames, data=da, dim=dim, bins=2001)
wav = scn.convert(stitched, origin='tof', target='wavelength', scatter=False)
if event_mode:
out = wav
else:
out = sc.rebin(wav,
dim='wavelength',
bins=sc.linspace(dim='wavelength',
start=1.0,
stop=10.0,
num=1001,
unit='angstrom'))
choppers = da.meta["choppers"].value
# Distance between WFM choppers
dz_wfm = sc.norm(choppers["WFMC2"].position - choppers["WFMC1"].position)
# Delta_lambda / lambda
dlambda_over_lambda = dz_wfm / sc.norm(coords['position'] -
frames['wfm_chopper_mid_point'].data)
return out, dlambda_over_lambda
def _check_lambda_inside_resolution(lam,
dlam_over_lam,
data,
event_mode=False,
check_value=True):
dlam = 0.5 * dlam_over_lam * lam
if event_mode:
sum_in_range = sc.bin(data,
edges=[
sc.array(dims=['wavelength'],
values=[(lam - dlam).value,
(lam + dlam).value],
unit=lam.unit)
]).bins.sum().data['wavelength', 0]
else:
sum_in_range = sc.sum(data['wavelength', lam - dlam:lam + dlam]).data
assert sc.isclose(sum_in_range, 1.0 * sc.units.counts).value is check_value
@pytest.mark.parametrize("dim", ['time', 'tof'])
@pytest.mark.parametrize("event_mode", [False, True])
def test_stitching_on_beamline(event_mode, dim):
wavelengths = sc.array(dims=['event'],
values=[1.75, 3.2, 4.5, 6.0, 7.0, 8.25],
unit='angstrom')
stitched, dlambda_over_lambda = _do_stitching_on_beamline(wavelengths,
dim=dim,
event_mode=event_mode)
for i in range(len(wavelengths)):
_check_lambda_inside_resolution(wavelengths['event', i],
dlambda_over_lambda,
stitched,
event_mode=event_mode)
@pytest.mark.parametrize("dim", ['time', 'tof'])
@pytest.mark.parametrize("event_mode", [False, True])
def test_stitching_on_beamline_bad_wavelength(event_mode, dim):
# Create 6 neutrons. The first wavelength is in this case too short to pass through
# the WFM choppers.
wavelengths = sc.array(dims=['event'],
values=[1.5, 3.2, 4.5, 6.0, 7.0, 8.25],
unit='angstrom')
stitched, dlambda_over_lambda = _do_stitching_on_beamline(wavelengths,
dim=dim,
event_mode=event_mode)
# The first wavelength should fail the check, since anything not passing through
# the choppers won't satisfy the dlambda/lambda condition.
_check_lambda_inside_resolution(wavelengths['event', 0],
dlambda_over_lambda,
stitched,
check_value=False,
event_mode=event_mode)
for i in range(1, len(wavelengths)):
_check_lambda_inside_resolution(wavelengths['event', i],
dlambda_over_lambda,
stitched,
event_mode=event_mode)
|
#!/usr/bin/env python
import sys
class Rule:
def __init__(self,exp):
self.pred = None
self.exp = exp
if len(self.exp) == 0:
self.exp.append('')
class NTerm:
def __init__(self):
self.rules = []
self.first = None
self.follow = None
nterm = {}
nt_list = []
used = set()
def First(k):
first = set()
if k not in nterm:
first.add(k)
return first
if nterm[k].first != None:
return nterm[k].first
if k in used:
return first
used.add(k)
for rule in nterm[k].rules:
f = set()
for t in rule.exp:
if '' in f:
f.remove('')
f |= First(t)
if '' not in f:
break
first |= f
return first
def Follow(k,follow):
if k in used:
return
used.add(k)
for rule in nterm[k].rules:
for token in reversed(rule.exp):
if token not in nterm:
break
nterm[token].follow |= follow
Follow(token,follow|nterm[k].follow)
def tcmp(a,b):
if len(a)*len(b) > 0 and ((a[0]=='"') ^ (b[0]=='"')):
return cmp((a[0]=='"'),(b[0]=='"'))
return cmp(a,b)
def toStr(s,f):
t = ''
for item in s:
t += ' '
if item != '':
t += item.strip('"')
else:
if f==1:
t += '(null)'
else:
t += '(eof)'
return t
for g in sys.stdin.read().strip().split(';'):
pr = g.split(':',2)
if len(pr) < 2:
continue
k, rules = [i.strip() for i in pr]
nt_list.append(k)
nterm[k] = NTerm()
nterm[k].rules = [Rule([token.strip()
for token in rule.strip().split()])
for rule in rules.split('|')]
for k in nterm:
used.clear()
nterm[k].first = First(k)
nterm[k].follow = set()
nterm[nt_list[0]].follow.add('')
for k in nterm:
for rule in nterm[k].rules:
follow = set([''])
for token in reversed(rule.exp):
if token not in nterm:
follow = set(token)
else:
nterm[token].follow |= follow
if '' in nterm[token].first:
follow |= nterm[token].first
else:
follow = nterm[token].first
for k in nterm:
used.clear()
Follow(k,nterm[k].follow)
for k in nt_list:
start = True
for rule in nterm[k].rules:
pred=set()
for token in rule.exp:
if '' in pred:
pred.remove('')
if token in nterm:
pred |= nterm[token].first
else:
pred.add(token)
if '' not in pred:
break
if '' in pred:
pred.remove('')
pred |= nterm[k].follow
if start:
start = False
print '%s\t%s\t%s\t%s\t%s' % (k,toStr(rule.exp,1),toStr(sorted(nterm[k].first,tcmp),1),toStr(sorted(nterm[k].follow,tcmp),0),toStr(sorted(pred,tcmp),0))
else:
print '\t%s\t\t\t%s' % (toStr(rule.exp,1),toStr(sorted(pred,tcmp),0))
|
from datetime import datetime
from typing import Optional
import pytz
from sqlalchemy.orm import Session
from app import services
from app.exceptions.instance_not_found import (
CarNotFoundException,
CustomerNotFoundException,
ReservationNotFoundException,
)
from app.exceptions.reservation import StartDateNotBeforeEndDateException
from app.utils.datetime_utils import datetime_without_seconds
def validate_car_with_id_exists(db: Session, car_id: int) -> None:
"""
Raises CarNotFoundException if car by given id doesn't exist
"""
car = services.car.get(db=db, _id=car_id)
if not car:
raise CarNotFoundException()
def validate_customer_with_id_exists(db: Session, customer_id: int) -> None:
"""
Raises CustomerNotFoundException if customer by given id doesn't exist
"""
customer = services.customer.get(db=db, _id=customer_id)
if not customer:
raise CustomerNotFoundException()
def validate_reservation_with_id_exists(
db: Session, reservation_id: Optional[int] = None
) -> None:
"""
Raises ReservationNotFoundException if reservation by given id doesn't exist
"""
if reservation_id:
reservation = services.reservation.get(db=db, _id=reservation_id)
if not reservation:
raise ReservationNotFoundException()
def validate_start_date_before_end_date(
start_date: datetime, end_date: datetime
) -> None:
"""
Raises StartDateNotBeforeEndDateException if start_date is after end_date
"""
delta = end_date - start_date
if delta.total_seconds() <= 0:
raise StartDateNotBeforeEndDateException()
def is_date_in_the_past(date: datetime) -> bool:
"""
Returns True if date is in the past, False otherwise
"""
now = datetime.now(tz=pytz.UTC)
now_without_seconds = datetime_without_seconds(now)
start_date_without_seconds = datetime_without_seconds(date)
return start_date_without_seconds < now_without_seconds
|
# Copyright (c) 2019, CMCC Technologies Co., Ltd.
# Copyright 2019 ZTE Corporation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from lcm.ns.serializers.sol.lccn_filter_data import LifeCycleChangeNotificationsFilter
from lcm.ns.serializers.sol.pub_serializers import LinkSerializer
class LccnSubscriptionLinkSerializer(serializers.Serializer):
self = LinkSerializer(
help_text="URI of this resource.",
required=True,
allow_null=False)
class LccnSubscriptionSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of this subscription resource.",
required=True,
allow_null=False)
filter = LifeCycleChangeNotificationsFilter(
help_text="Filter settings for this subscription, to define the of all notifications this subscription relates to.",
required=False)
callbackUri = serializers.CharField(
help_text="The URI of the endpoint to send the notification to.",
required=True,
allow_null=False)
_links = LccnSubscriptionLinkSerializer(
help_text="Links to resources related to this resource.",
required=True)
class LccnSubscriptionsSerializer(serializers.ListSerializer):
child = LccnSubscriptionSerializer()
|
import numpy as np
from PIL import Image
import h5py
import random as rng
import matplotlib.pyplot as plt
from PIL import ExifTags
import scipy.misc
class Patcher():
def __init__(self, _img_arr, _lbl_arr, _dim, _stride=(4,4), _patches=None, _labels=None):
self.img_arr = _img_arr
if _lbl_arr == None:
_lbl_arr = np.ones((_img_arr.shape[0], _img_arr.shape[1]))
self.lbl_arr = _lbl_arr
self.dim = _dim
self.stride = _stride
self.patches = _patches
self.labels = _labels
@classmethod
def from_image(cls, _img_file, _lbl_file, _dim=(32,32), _stride=(4,4)):
img = Image.open(_img_file)
d0, d1 = img.size[0], img.size[1]
img = img.resize((int(d0/2.0), int(d1/2.0)))
#img = img.resize((754, 424), Image.ANTIALIAS)
#img = img.crop((121, 0, 633, 424))
img_arr = np.array(img, dtype=np.float32)/255.0
if _lbl_file == None:
lbl_arr = None
else:
lbl = Image.open(_lbl_file)
lbl_arr = np.array(lbl, dtype=np.float32)[:,:,0]/255.0
assert img_arr.shape[0] == lbl_arr.shape[0]
assert img_arr.shape[1] == lbl_arr.shape[1]
return cls(img_arr, lbl_arr, _dim, _stride)
def set_patch_dim(self, _dim):
self.dim = _dim
def create_patch(self, pos, flatten=False, label=False):
d0 = self.dim[0]
d1 = self.dim[1]
shape = self.img_arr.shape
d00 = pos[0]
d01 = pos[0] + d0
if d01 > shape[0]:
d00 = d00 - (d01 - shape[0])
d01 = d01 - (d01 - shape[0])
d10 = pos[1]
d11 = pos[1] + d1
if d11 > shape[1]:
d10 = d10 - (d11 - shape[1])
d11 = d11 - (d11 - shape[1])
if label:
patch = self.lbl_arr[d00:d01, d10:d11]
else:
patch = self.img_arr[d00:d01, d10:d11]
assert patch.shape[0] == d0
assert patch.shape[1] == d1
if flatten:
return patch.flatten()
else:
if label:
return patch.reshape((d0, d1, 1))
else:
return patch
def patchify(self):
if self.patches != None:
return self.patches, self.labels
self.patches = []
self.labels = []
shape = self.img_arr.shape
d0 = self.dim[0]
d1 = self.dim[1]
s0 = self.stride[0]
s1 = self.stride[1]
for i0 in range(0, shape[0] - d0, s0):
for i1 in range(0, shape[1] - d1, s1):
label_patch = self.create_patch([i0, i1], label=True)
if np.sum(label_patch.flatten()) > 0 or rng.randint(0,100) < 25:
self.patches.append(self.create_patch([i0, i1], label=False))
self.labels.append(label_patch)
return self.patches, self.labels
def num_patches(self):
return self.patches.shape[0]
def predict(self, predictor, frac=1.0):
pred_label = np.zeros_like(self.lbl_arr)
shape = pred_label.shape
d0 = self.dim[0]
d1 = self.dim[1]
d0_stride = int(d0 * frac)
d1_stride = int(d1 * frac)
patches = []
# TODO:
# This cuts off any part of the image not aligned with d0, d1, boundarys.
# For small enough patch dimensions this isn't a huge deal, but still would
# be a good idea to create a smarter algorithm here.
for i0 in range(0, shape[0], d0_stride):
for i1 in range(0, shape[1], d1_stride):
patches.append(self.create_patch([i0, i1], label=False))
patches = np.array(patches)
preds = predictor(patches)
i = 0
for i0 in range(0, shape[0], d0_stride):
for i1 in range(0, shape[1], d1_stride):
if i0 + d0 > shape[0]:
if i1 + d1 > shape[1]:
pred_label[i0:, i1:] += preds[i].reshape((d0, d1))[d0 - (shape[0] - i0):, d1 - (shape[1] - i1):]
else:
pred_label[i0:, i1:i1+d1] += preds[i].reshape((d0, d1))[d0 - (shape[0] - i0):, :]
elif i1 + d1 > shape[1]:
pred_label[i0:i0+d0, i1:] += preds[i].reshape((d0, d1))[:, d1 - (shape[1] - i1):]
else:
pred_label[i0:i0+d0, i1:i1+d1] += preds[i].reshape((d0, d1))
i = i + 1
#pred_label = np.where(pred_label > 0.7, 1, 0)
return pred_label
|
from pprint import pprint
import time
from multiln.rpccaller import RpcCaller
def btc_init_bitcoind_global(chains):
to_return = {}
for chain_name in chains:
port_chain = '185%s5' % (chains[chain_name]['port_decimal'])
to_return[chain_name] = RpcCaller(
'%s:%s' % (chains[chain_name]['host_url'], port_chain),
'user%s' % port_chain,
'password%s' % port_chain,
)
return to_return
def btc_wait_deamons_start(bitcoind_map):
for chain_name, rpccaller in bitcoind_map.items():
while True:
print('Waiting bitcoind for chain %s to start' % chain_name)
if not 'error' in rpccaller.call('help', {}):
break
time.sleep(1)
def generate_blocks(rpccaller, chain_name, nblocks):
address = rpccaller.call('getnewaddress', {})
block_hashes = rpccaller.call('generatetoaddress', {'nblocks': nblocks, 'address': address})
print('Generated %s %s blocks:' % (nblocks, chain_name))
# print(block_hashes)
def btc_generate_all_chains(bitcoind_map, nblocks):
for chain_name, rpccaller in bitcoind_map.items():
generate_blocks(rpccaller, chain_name, nblocks)
def print_balances(bitcoind_map):
for chain_name, rpccaller in bitcoind_map.items():
print('getbalance', chain_name, rpccaller.call('getbalance', {}))
print('getbalances', chain_name, rpccaller.call('getbalances', {}))
def btc_print_block_at_height(bitcoind_map, height):
for chain_name, rpccaller in bitcoind_map.items():
blockhash = rpccaller.call('getblockhash', {'height': height})
print('getblockhash', chain_name, blockhash)
block = rpccaller.call('getblock', {'blockhash': blockhash})
for txid in block['tx']:
tx = rpccaller.call('getrawtransaction', {'blockhash': blockhash, 'txid': txid, 'verbose': 1})
print('getrawtransaction', chain_name)
pprint(tx)
|
'''
Deduplication of near duplicates
================================
Remove nears duplicates of projects from the data. Numeric
fields (such as funding) are aggregated together.
'''
import logging
import luigi
import datetime
from nesta.packages.misc_utils.batches import split_batches, put_s3_batch
from nesta.core.luigihacks.misctools import find_filepath_from_pathstub as f3p
from nesta.core.luigihacks.mysqldb import MySqlTarget
from nesta.core.luigihacks import autobatch
from nesta.core.orms.orm_utils import get_es_ids
from nesta.core.orms.orm_utils import setup_es
from nesta.core.orms.orm_utils import get_config
from nesta.core.routines.nih.nih_data.nih_abstracts_mesh_task import AbstractsMeshTask
class DedupeTask(autobatch.AutoBatchTask):
'''
'''
date = luigi.DateParameter()
routine_id = luigi.Parameter()
intermediate_bucket = luigi.Parameter()
db_config_path = luigi.Parameter()
process_batch_size = luigi.IntParameter(default=5000)
drop_and_recreate = luigi.BoolParameter(default=False)
def output(self):
'''Points to the output database engine'''
db_config = get_config(self.db_config_path,
"mysqldb")
db_config["database"] = ('dev' if self.test
else 'production')
db_config["table"] = f"{self.routine_id} <dummy>" # Fake table
update_id = f"NiHDedupeTask-{self.routine_id}_{self.date}"
return MySqlTarget(update_id=update_id, **db_config)
def requires(self):
yield AbstractsMeshTask(date=self.date,
drop_and_recreate=self.drop_and_recreate,
_routine_id=self.routine_id,
db_config_path=self.db_config_path,
test=self.test,
batchable=f3p("batchables/nih/"
"nih_abstract_mesh_data"),
env_files=[f3p("nesta/"),
f3p("config/mysqldb.config"),
f3p("config/elasticsearch.yaml"),
f3p("nih.json")],
job_def=self.job_def,
job_name="AbstractsMeshTask-%s" % self.routine_id,
job_queue=self.job_queue,
region_name=self.region_name,
poll_time=self.poll_time,
memory=self.memory,
max_live_jobs=50)
def prepare(self):
if self.test:
self.process_batch_size = 1000
logging.warning("Batch size restricted to "
f"{self.process_batch_size}"
" while in test mode")
es_kwargs = dict(endpoint='health-scanner',
dataset='nih', production=not self.test)
_, _old_config = setup_es(**es_kwargs)
es, es_config = setup_es(drop_and_recreate=self.drop_and_recreate,
increment_version=True, **es_kwargs)
# Count articles from the old index
logging.info(f"Collected article IDs...")
_ids = get_es_ids(es, _old_config, size=10000)
logging.info(f"Collected {len(_ids)} IDs")
done_ids = get_es_ids(es, es_config, size=10000)
# Generate the job params
job_params = []
batches = split_batches(_ids, self.process_batch_size)
for count, batch in enumerate(batches, 1):
# Magical '0.3' is the lower end of the deduplication
# fraction found by inspection
done = sum(_id in done_ids
for _id in batch) / len(batch) > 0.3
# write batch of ids to s3
batch_file = ''
if not done:
batch_file = put_s3_batch(batch,
self.intermediate_bucket,
self.routine_id)
params = {
"batch_file": batch_file,
"config": 'mysqldb.config',
"bucket": self.intermediate_bucket,
"done": done,
'outinfo': es_config['host'],
'out_port': es_config['port'],
'out_index': es_config['index'],
'in_index': _old_config['index'],
'out_type': es_config['type'],
'aws_auth_region': es_config['region'],
'entity_type': 'paper',
'test': self.test,
'routine_id': self.routine_id
}
job_params.append(params)
if self.test and count > 1:
logging.warning("Breaking after 2 batches "
"while in test mode.")
logging.warning(job_params)
break
logging.info("Batch preparation completed, "
f"with {len(job_params)} batches")
return job_params
def combine(self, job_params):
'''Touch the checkpoint'''
self.output().touch()
|
import unittest
import tempfile
import os
from habitipy.api import parse_apidoc, ApiEndpoint
test_data = """
@api {post} /api/v3/user/webhook Create a new webhook - BETA
@apiParam (Body) {UUID} [id="Randomly Generated UUID"] The webhook's id
@apiParam (Body) {String} url The webhook's URL
@apiParam (Body) {String} [label] A label to remind you what this webhook does
@apiParam (Body) {Boolean} [enabled=true] If the webhook should be enabled
@apiParam (Body) {Sring="taskActivity","groupChatReceived"} [type="taskActivity"] The webhook's type.
@apiParam (Body) {Object} [options] The webhook's options. Wil differ depending on type. Required for `groupChatReceived` type. If a webhook supports options, the default values are displayed in the examples below
@apiSuccess (201) {Object} data The created webhook
@apiSuccess (201) {UUID} data.id The uuid of the webhook
@apiSuccess (201) {String} data.url The url of the webhook
@apiSuccess (201) {String} data.label A label for you to keep track of what this webhooks is for
@apiSuccess (201) {Boolean} data.enabled Whether the webhook should be sent
@apiSuccess (201) {String} data.type The type of the webhook
@apiSuccess (201) {Object} data.options The options for the webhook (See examples)
@api {put} /api/v3/user/webhook/:id Edit a webhook - BETA
@apiParam (Path) {UUID} id URL parameter - The id of the webhook to update
@apiParam (Body) {String} [url] The webhook's URL
@apiParam (Body) {String} [label] A label to remind you what this webhook does
@apiParam (Body) {Boolean} [enabled] If the webhook should be enabled
@apiParam (Body) {Sring="taskActivity","groupChatReceived"} [type] The webhook's type.
@apiParam (Body) {Object} [options] The webhook's options. Wil differ depending on type. The options are enumerated in the [add webhook examples](#api-Webhook-UserAddWebhook).
@apiSuccess {Object} data The updated webhook
@apiSuccess {UUID} data.id The uuid of the webhook
@apiSuccess {String} data.url The url of the webhook
@apiSuccess {String} data.label A label for you to keep track of what this webhooks is for
@apiSuccess {Boolean} data.enabled Whether the webhook should be sent
@apiSuccess {String} data.type The type of the webhook
@apiSuccess {Object} data.options The options for the webhook (See webhook add examples)
@api {delete} /api/v3/user/webhook/:id Delete a webhook - BETA
@apiParam (Path) {UUID} id The id of the webhook to delete
"""
wrong_apidoc_data = [
"""@api {delete} /api/v3/user/webhook/:id Delete a webhook - BETA
@apiParam (Path) {UUID} id The id of the webhook to delete
@apiSuccess (201) {String} type The type of the webhook
@apiSuccess {Object} options The options for the webhook (See webhook add examples)
""",
"""@api {delete} /api/v3/user/webhook/:id Delete a webhook - BETA
@apiParam (Path) {UUID} id The id of the webhook to delete
@apiSuccess {String} type The type of the webhook
@apiSuccess (201) {Object} options The options for the webhook (See webhook add examples)
""",
]
# pylint: disable=missing-docstring
endpoint_attrs = ('method', 'uri', 'title')
expected_endpoints = [
('post', '/api/v3/user/webhook', 'Create a new webhook - BETA'),
('put', '/api/v3/user/webhook/:id', 'Edit a webhook - BETA'),
('delete', '/api/v3/user/webhook/:id', 'Delete a webhook - BETA')
]
class TestParse(unittest.TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
os.remove(self.file.name)
def test_read(self):
self.file.write(test_data.encode('utf-8'))
self.file.close()
ret = parse_apidoc(self.file.name)
self.assertEqual(len(ret), 3)
def test_wrong_apidoc0(self):
self.file.write(wrong_apidoc_data[0].encode('utf-8'))
self.file.close()
with self.assertRaises(ValueError):
ret = parse_apidoc(self.file.name)
def test_wrong_apidoc1(self):
self.file.write(wrong_apidoc_data[1].encode('utf-8'))
self.file.close()
with self.assertRaises(ValueError):
ret = parse_apidoc(self.file.name)
class TestParsedEndpoints(unittest.TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
self.file.write(test_data.encode('utf-8'))
self.file.close()
self.ret = parse_apidoc(self.file.name)
os.remove(self.file.name)
def test_read(self):
[self.assertIsInstance(x, ApiEndpoint) for x in self.ret] # pylint: disable=W0106
for expected_values, obj in zip(expected_endpoints, self.ret):
for attr, expected in zip(endpoint_attrs, expected_values):
self.assertEqual(getattr(obj, attr), expected)
def test_retcodes(self):
for retcode, obj in zip([201,200,200], self.ret):
self.assertEqual(obj.retcode, retcode)
|
import math
import logging
import glob
import numpy as np
from sparse_ct.reconstructor_2d import (
SartReconstructor,
SartBM3DReconstructor,
DgrReconstructor,
N2SelfReconstructor)
from . import image_to_sparse_sinogram
def get_images(path):
img_png = glob.glob(path+'/*.png')
img_jpg = glob.glob(path+'/*.jpg')
return img_jpg + img_png
def benchmark(
images_path,
recon,
theta,
noise_pow
):
images=get_images(images_path)
log_filename = 'benchmark/benchmark_{recon}'.format(
recon=recon.name
)
logging.basicConfig(
filename='{}.log'.format(log_filename),
filemode='a',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logging.warning('Starting')
logging.warning('images: %s', images_path)
logging.warning('n_proj: %s', len(theta))
logging.warning('noise_pow: %s', noise_pow)
mse_list = []
psnr_list = []
ssim_list = []
for fname in images:
gt, sinogram, theta, FOCUS = image_to_sparse_sinogram(fname,
channel=1, n_proj=len(theta), size=512,
angle1=0.0, angle2=180.0, noise_pow=noise_pow)
# set metrics
if type(recon) == DgrReconstructor:# or type(recon) == N2SelfReconstructor:
recon_bm3d = SartReconstructor('SART',
sart_n_iter=40, sart_relaxation=0.15)
img_sart_bm3d = recon_bm3d.calc(sinogram, theta)
recon.set_for_metric(gt, img_sart_bm3d, FOCUS=FOCUS, log_dir='../log/dip')
recon.calc(sinogram, theta)
mse, psnr, ssim = recon.eval(gt)
mse_list.append(mse)
psnr_list.append(psnr)
ssim_list.append(ssim)
logstr = "{}: MSE:{:.5f} PSNR:{:.5f} SSIM:{:.5f}".format(
fname, mse, psnr, ssim
)
logging.info(logstr)
logging.info('Avg: MSE:{:.5f} PSNR:{:.5f} SSIM:{:.5f}'.format(
np.mean(mse_list),
np.mean(psnr_list),
np.mean(ssim_list)
))
logging.info('Std: MSE:{:.5f} PSNR:{:.5f} SSIM:{:.5f}'.format(
np.std(mse_list),
np.std(psnr_list),
np.std(ssim_list)
))
logging.warning('Done.')
logging.critical('Summary;{};{};{};{};{:.2f};{:.2f};{:.4f};{:.4f}'.format(
recon.name,
images_path,
len(theta),
noise_pow,
np.mean(psnr_list),
np.std(psnr_list),
np.mean(ssim_list),
np.std(ssim_list)
))
if __name__ == "__main__":
pass
|
import numpy as np
from tqdm import tqdm
import sys
import os
import re
from mpl_toolkits.mplot3d import axes3d
lib_path = os.getcwd()
sys.path.append(f'{lib_path}')
class MyAxes3D(axes3d.Axes3D):
def __init__(self, baseObject, sides_to_draw):
self.__class__ = type(baseObject.__class__.__name__,
(self.__class__, baseObject.__class__),
{})
self.__dict__ = baseObject.__dict__
self.sides_to_draw = list(sides_to_draw)
self.mouse_init()
def set_some_features_visibility(self, visible):
for t in self.w_zaxis.get_ticklines() + self.w_zaxis.get_ticklabels():
t.set_visible(visible)
self.w_zaxis.line.set_visible(visible)
self.w_zaxis.pane.set_visible(visible)
self.w_zaxis.label.set_visible(visible)
def draw(self, renderer):
# set visibility of some features False
self.set_some_features_visibility(False)
# draw the axes
super(MyAxes3D, self).draw(renderer)
# set visibility of some features True.
# This could be adapted to set your features to desired visibility,
# e.g. storing the previous values and restoring the values
self.set_some_features_visibility(True)
zaxis = self.zaxis
draw_grid_old = zaxis.axes._draw_grid
# disable draw grid
zaxis.axes._draw_grid = False
tmp_planes = zaxis._PLANES
if 'l' in self.sides_to_draw :
# draw zaxis on the left side
zaxis._PLANES = (tmp_planes[2], tmp_planes[3],
tmp_planes[0], tmp_planes[1],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
if 'r' in self.sides_to_draw :
# draw zaxis on the right side
zaxis._PLANES = (tmp_planes[3], tmp_planes[2],
tmp_planes[1], tmp_planes[0],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
zaxis._PLANES = tmp_planes
# disable draw grid
zaxis.axes._draw_grid = draw_grid_old
def layer_pca(hidden_stacked, **kwargs):
import torch
h_dim = hidden_stacked.shape
if 'n_pc' in kwargs:
n_pc = kwargs.get('n_pc')
#hs_proj = torch.zeros((h_dim[0], n_pc, h_dim[1]))
hs_proj = torch.zeros((h_dim[0], h_dim[1], n_pc))
else:
hs_proj = torch.zeros_like(hidden_stacked)
#hs_proj = torch.zeros((h_dim[0], h_dim[2], h_dim[1]))
singular_values = torch.zeros((h_dim[0], min(h_dim[1], h_dim[2])))
for l in tqdm(range(h_dim[0])):
hidden = hidden_stacked[l,:].detach().numpy()
hidden = hidden - hidden.mean(0, keepdims=True)
u, s, v = np.linalg.svd(hidden, full_matrices=False)
if l == h_dim[0] - 1:
print(hs_proj.shape)
print(singular_values.shape)
print('\n')
print(hidden.shape)
print(s.shape)
print(u.shape)
if 'n_pc' in kwargs:
#norm_s = s[:n_pc] / s[:n_pc].max()
#u = (norm_s[None, :]) * u[:, :n_pc]
#u = u / np.abs(u).max()
hs_proj[l,:] = torch.tensor(u[:, :3])
else:
#norm_s = s[:] / s[:].max()
#u = (norm_s[None, :]) * u[:, :]
#u = u / np.abs(u).max()
hs_proj[l,:] = torch.tensor(u)
singular_values[l,:] = torch.tensor(s)
return hs_proj, singular_values
def gcircle_prop(N, L, N_thetas, alpha100, g100, *args):
import torch
from nporch.theory import q_star
# get the SEM of the manifold distances propagated through the DNN layers
# Extract numeric arguments.
N = int(N)
L = int(L)
N_thetas = int(N_thetas)
alpha = int(alpha100)/100.
g = int(g100)/100.
# operate at fixed point
#q_fixed = q_star(alpha,g)
q_fixed = 1
#print(q_fixed)
# Generate circular manifold.
hs = np.zeros([N, N_thetas])
thetas = np.linspace(0, 2*np.pi, N_thetas)
hs[0,:] = q_fixed * np.cos(thetas)
hs[1,:] = q_fixed * np.sin(thetas)
hs_all = np.zeros([L + 1, N_thetas, N]) # distance SEMs, angular SEMs
hs_all[0,:,:] = hs.T
from scipy.stats import levy_stable
for l in tqdm(range(L)):
hs = np.dot(levy_stable.rvs(alpha, 0, size=[N,N], scale=g*(0.5/N)**(1./alpha)),
np.tanh(hs))
hs_all[l + 1,:,:] = hs.T
return torch.tensor(hs_all) # convert to torch
def gcircle_save(N, L, N_thetas,
alpha100, g100, *args):
import torch
path = "/project/phys_DL/Anomalous-diffusion-dynamics-of-SGD/geometry_data/gcircle3d_data"
if not os.path.exists(f'{path}'):
os.makedirs(f'{path}')
torch.save(gcircle_prop(N, L, N_thetas, alpha100, g100),
f"{path}/gcircles_alpha_{alpha100}_g_{g100}")
def submit(*args):
from qsub import qsub
pbs_array_data = [(alpha100, g100)
#for alpha100 in range(100, 201, 5)
#for g100 in range(5, 301, 5)
#for alpha100 in range(100, 201, 25)
for alpha100 in [100,150,200]
#for g100 in [25, 100, 300]
for g100 in [300]
#for alpha100 in [100]
#for g100 in [1, 100]
]
#qsub(f'python geometry_analysis/greatcircle_proj_trial.py {sys.argv[0]} {" ".join(args)}',
#qsub(f'python greatcircle_proj_trial.py {sys.argv[0]} {" ".join(args)}',
qsub(f'python {sys.argv[0]} {" ".join(args)}',
pbs_array_data,
path='/project/phys_DL/Anomalous-diffusion-dynamics-of-SGD/geometry_data/',
P='phys_DL')
# ----- preplot -----
def gcircle_preplot(alpha100, g100, *args):
import torch
path = "/project/phys_DL/Anomalous-diffusion-dynamics-of-SGD/geometry_data/gcircle3d_data"
if not os.path.exists(f'{path}'):
os.makedirs(f'{path}')
hs_all = torch.load(f"{path}/gcircles_alpha_{alpha100}_g_{g100}")
hs_proj, singular_values = layer_pca(hs_all, n_pc=3)
torch.save(hs_proj, f"{path}/eigvecs_alpha_{alpha100}_g_{g100}")
torch.save(singular_values, f"{path}/singvals_alpha_{alpha100}_g_{g100}")
print("Preplot done!")
def submit_preplot(*args):
#def submit_preplot(path):
data_path = "/project/phys_DL/Anomalous-diffusion-dynamics-of-SGD/geometry_data/gcircle3d_data"
# find the `alpha100`s and `g100`s of the files in the folder
#pbs_array_data = set([tuple(re.findall('\d+', fname)[:2]) for fname in os.listdir(data_path)
# if all(s in fname for s in ('alpha', 'g', 'gcircles'))])
pbs_array_data = [(100,300), (150,300), (200,300)]
print(pbs_array_data)
from qsub import qsub
qsub(f'python {sys.argv[0]} gcircle_preplot', pbs_array_data,
path='/project/phys_DL/Anomalous-diffusion-dynamics-of-SGD/geometry_data/',
P='phys_DL')
# ----- plot -----
def gcircle_plot(*args):
#alpha100_ls, g100_ls = [200], [1]
#alpha100_ls, g100_ls = [100,150,200], [1,100,200]
#alpha100_ls, g100_ls = [100,150,200], [25,100,200]
alpha100_ls, g100_ls = [100,150,200], [25,100,300]
from time import time
t0 = time()
import torch
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
from matplotlib.gridspec import GridSpec
from matplotlib.pyplot import subplot, title, axis, xlim, ylim, gca, xticks, yticks, xlabel, ylabel, plot, legend, gcf, cm # colorbar
from mpl_toolkits.axes_grid.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.cm import coolwarm
# colorbar
#cm = cm.get_cmap('plasma')
cm = cm.get_cmap('twilight')
tick_size = 18.5
label_size = 18.5
axis_size = 18.5
legend_size = 14
linewidth = 0.8
text_size = 14
data_path = "/project/phys_DL/Anomalous-diffusion-dynamics-of-SGD/geometry_data/gcircle3d_data"
plot_path = "/project/phys_DL/Anomalous-diffusion-dynamics-of-SGD/figure_ms"
alpha_mult_pair = []
for alpha100 in alpha100_ls:
for g100 in g100_ls:
alpha_mult_pair.append((alpha100,g100))
# Font size
tick_size = 16.5
label_size = 16.5
axis_size = 16.5
legend_size = 14
linewidth = 0.8
# Set up figure
#rows = 3
rows = 1
cols = 3
#layer_ii = [5,15,35]
#layer_ii = [35]
layer_ii = [30]
#layer_ii = [25]
#assert max(layer_ii) <= L, "layer_ii incorrect"
# Projection plot
ls_all = [list(range(3)), list(range(3,6)), list(range(6,9))]
#ls_all = [list(range(3))]
#ls_all = [list(range(1))]
#ls_all = [list(range(3,6))]
# thetas
N_thetas = 1000
thetas = np.linspace(0, 2*np.pi, N_thetas)
vert_dists = [0.78, 0.5, 0.22]
for ls in ls_all:
#fig = plt.figure(figsize=(9.5,7.142))
fig = plt.figure(figsize=(9.5,7.142/3 + 0.75))
"""
mins = []
maxs = []
for f_ii in range(len(ls)):
folder_ii = ls[f_ii]
alpha100, g100 = alpha_mult_pair[folder_ii]
print((alpha100,g100))
alpha, m = alpha100/100, g100/100
hs_proj = torch.load(f"{data_path}/eigvecs_alpha_{alpha100}_g_{g100}")
singular_values = torch.load(f"{data_path}/singvals_alpha_{alpha100}_g_{g100}")
for k in range(len(layer_ii)):
ii = layer_ii[k]
z = hs_proj[ii, :, 2]
mins.append(min(z))
maxs.append(max(z))
"""
#cmap_bd = [min(mins), max(maxs)]
#cmap_bd = [-0.05,0.05]
cmap_bd = [0, 2*np.pi]
for f_ii in range(len(ls)):
folder_ii = ls[f_ii]
alpha100, g100 = alpha_mult_pair[folder_ii]
print((alpha100,g100))
alpha, m = alpha100/100, g100/100
hs_proj = torch.load(f"{data_path}/eigvecs_alpha_{alpha100}_g_{g100}")
singular_values = torch.load(f"{data_path}/singvals_alpha_{alpha100}_g_{g100}")
for k in range(len(layer_ii)):
# figure index
#fig_ii = 3*f_ii + k + 1
fig_ii = rows*f_ii + k + 1
print(fig_ii)
ax = fig.add_subplot(rows,cols,fig_ii,projection='3d')
#ax = fig.add_subplot(3,3,fig_ii)
ii = layer_ii[k]
total_var = sum(singular_values[ii,:])
print(f"Total variance {total_var}")
print(f"Top singular value: {singular_values[ii, 0]}")
# singular_values
#hs_proj[ii] = hs_proj[ii] * singular_values[ii, 0:3]
x, y ,z = hs_proj[ii, :, 0], hs_proj[ii, :, 1],hs_proj[ii, :, 2]
im = ax.scatter(x , y , z, c=thetas, vmin=cmap_bd[0], vmax=cmap_bd[1], marker='.', s=4, cmap=cm)
#im = ax.scatter(x , y , z, c=z, vmin=cmap_bd[0], vmax=cmap_bd[1], marker='.', s=4, cmap=cm)
# lines
#ax.plot(x , y , z,color='k', zorder=0, linewidth=0.25, alpha=.35)
ax.zaxis.set_rotate_label(False)
ax.set_title(r"$D_w^{1/\alpha}$ = " + f"{m}", loc='left', fontsize=label_size - 3)
if fig_ii == 2:
#fig.text(0.07, vert_dists[f_ii], r"$D_w^{1/\alpha}$" + f" = {m}", rotation=90, va='center', fontsize=label_size - 2)
fig.text(0.07, vert_dists[f_ii], "Layer {fname}".format(fname=layer_ii[k]), rotation=90, va='center', fontsize=label_size - 3)
#if fig_ii % 3 == 2:
# #ax.set_zlabel(rf"$\alpha, m$ = {alpha}, {m}", rotation=90)
# ax.set_zlabel("")
# keep zaxis on the right (https://www.py4u.net/discuss/13794)
ax = fig.add_axes(MyAxes3D(ax, 'l'))
# set lims
pmax = 0.05
#pmax = 0.06
ax.set_xlim(-pmax, pmax); ax.set_ylim(-pmax, pmax);
ax.set_zlim(-pmax, pmax);
# tick labels
#if fig_ii != 1:
# ax.set_zticklabels([]);
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.set_zticklabels([]);
# inset plot
borderpad = -.5
ins = inset_axes(ax, width="30%", height="30%", borderpad=borderpad)
ins.bar(list(range(1,6)),singular_values[ii, 0:5]/total_var, color='k')
print(singular_values[ii, 0:5]/total_var)
ins.axhline(y=0.5, color='k', linestyle='-', linewidth=0.5, alpha=0.3)
ins.set_yticks([0, 0.5, 1.0])
if fig_ii != 1:
ins.set_yticklabels([])
#ins.set_xticklabels([])
else:
ins.set_yticklabels([0, 0.5, 1.0])
#ins.set_xticklabels(list(range(1,6)))
ins.set_xticklabels([])
ins.set_xticks(list(range(1,6)))
ins.set_ylim(0,1)
# colorbar
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.20, 0.015, 0.75])
cbar_ticks = [0, np.pi, 2*np.pi]
cbar = fig.colorbar(im, cax=cbar_ax, ticks=cbar_ticks)
cbar.ax.set_yticklabels(['0', r'$\pi$', r'$2\pi$'])
# suptitle as alpha
fig.suptitle(rf"$\alpha$ = {alpha}", fontsize=label_size)
plt.savefig(f"{plot_path}/proj3d_single_alpha={alpha}.pdf", bbox_inches='tight')
print(f"alpha={alpha} done!")
#plt.savefig(f"{plot_path}/proj3d_{alpha}.pdf")
plt.clf()
#quit()
print(f"{time() - t0} s!")
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('Usage: python %s FUNCTION_NAME ARG1 ... ARGN' % sys.argv[0])
quit()
result = globals()[sys.argv[1]](*sys.argv[2:])
|
from stock.models import StockInfo, CapitalStockAmountHistory, TradeRecord, IndexRecord
from django.db.models import Max
# from stock.controllers import send_email
def get_covariance(x, y):
avg_x = sum(x) / len(x)
avg_y = sum(y) / len(y)
sum_all = 0.0
for i in range(len(x)):
sum_all += (x[i] - avg_x) * (y[i] - avg_y)
return round(sum_all / (len(x) - 1), 2)
def get_standard_deviation(x):
avg_x = sum(x) / len(x)
sum_all = 0.0
for i in range(len(x)):
sum_all += (x[i] - avg_x)**2
re = (sum_all / (len(x) - 1))**0.5
return round(re, 2)
def get_unarg_parameter(x, y):
avg_x = sum(x) / len(x)
avg_y = sum(y) / len(y)
sum_xy = 0.0
for i in range(len(x)):
sum_xy += (x[i] - avg_x) * (y[i] - avg_y)
sum_x = 0.0
for i in range(len(x)):
sum_x += (x[i] - avg_x)**2
b1 = sum_xy / sum_x
b0 = avg_y - b1 * avg_x
return round(b1, 4), round(b0, 4)
def get_capital_by_date(symbol, date, close_price=1):
try:
capital = CapitalStockAmountHistory.objects.filter(code=symbol, change_date__lte=date)\
.order_by('-change_date').first()
if capital:
capital = capital.num
else:
stock = StockInfo.objects.get(code=symbol)
capital = stock.equity
price = TradeRecord.objects.filter(code=symbol, date__lte=date)\
.order_by('-date').first()
if price:
if close_price:
price = price.close_price
else:
price = price.open_price
else:
price = 0
total = price * capital
return total
except Exception as e:
print(e)
return 0
def get_increase_by_block():
blocks = StockInfo.objects.exclude(block='').values_list('block').distinct()
max_date = TradeRecord.objects.aggregate(Max('date'))
max2_date = TradeRecord.objects.exclude(date=max_date['date__max']).aggregate(Max('date'))
max_date = max_date['date__max']
max2_date = max2_date['date__max']
block_dict = {}
for block in blocks:
block = block[0]
block_stocks = StockInfo.objects.filter(status__in=['正常', '停牌'], block=block)
sum_block = 0.0
for one in block_stocks:
capital = get_capital_by_date(one.code, max_date)
sum_block += capital
sum2_block = 0.0
for one in block_stocks:
capital = get_capital_by_date(one.code, max2_date)
if not capital:
capital = get_capital_by_date(one.code, max_date, 0)
sum2_block += capital
percent = round(sum_block / sum2_block, 6) if sum2_block else 0
block_dict[block] = percent
shang_index = IndexRecord.objects.filter(name='上证指数', date=max_date).first()
shang_index2 = IndexRecord.objects.filter(name='上证指数', date=max2_date).first()
if shang_index and shang_index2:
index = round((shang_index.close_index / shang_index2.close_index - 1) * 100, 4)
else:
index = 0
block_dict = {k: round(100 * (v - 1), 2) for k, v in block_dict.items() if v > 0.5}
sorted_dict = sorted(block_dict.items(), key=lambda item: -1 * item[1])
top_dict = sorted_dict[:5]
low_dict = sorted_dict[-5:]
top_dict = [(one[0], str(one[1]) + '%') for one in top_dict]
low_dict = [(one[0], str(one[1]) + '%') for one in low_dict]
str_all = "## Shang Index: " + str(index) + '%\n'
str_all += "## Top5: \n"
for one in top_dict:
str_all += "### " + str(one[0]) + ": " + str(one[1]) + '\n'
str_all += "## Low5: \n"
for one in low_dict:
str_all += "### " + str(one[0]) + ": " + str(one[1]) + '\n'
top_stocks = {}
low_stocks = {}
for one in top_dict:
stock_list = StockInfo.objects.filter(
status__in=['正常', '停牌'], block=one[0]).values_list('name', 'code')
top_stocks[one[0]] = stock_list
for one in low_dict:
stock_list = StockInfo.objects.filter(
status__in=['正常', '停牌'], block=one[0]).values_list('name', 'code')
low_stocks[one[0]] = stock_list
for one in top_stocks:
str_all += "### " + str(one) + ": "
str_value = []
for one in top_stocks[one]:
str_value .append(':'.join(one))
str_all += ' '.join(str_value) + '\n'
for one in low_stocks:
str_all += "### " + str(one) + ": "
str_value = []
for one in low_stocks[one]:
str_value .append(':'.join(one))
str_all += ' '.join(str_value) + '\n'
# send_email(str_all, title='Block by Index')
return str_all
|
import re
import hashlib
import datetime
import dateparser
import humanize
class Task:
def __init__(self):
self.text = None
self.meta = {
'id': '',
'created': datetime.datetime.now(),
'updated': datetime.datetime.now()
}
def fillFromStoredLine(self, line):
if '|' not in line:
self.text = line.strip()
self.meta['id'] = self._createId(self.text)
return
text, _, meta = line.rpartition('|')
self.text = text.strip()
for piece in meta.strip().split(','):
label, data = piece.split(':', 1)
data = data.strip()
label = label.strip()
if label == 'created' or label == 'updated' or label == 'dueDate':
data = datetime.datetime.strptime(data, '%Y-%m-%d %H:%M:%S')
self.meta[label] = data
def _createId(self, text):
return hashlib.sha1(text.encode('utf-8')).hexdigest()
def fillFromHumanLine(self, line):
dueDate, line = self._parseHumanDueDate(line)
timer, line = self._parseHumanNotificationTimer(line)
dueDate = datetime.datetime.now() if not dueDate and timer else dueDate
self.text = re.sub(r"\s+", " ", line)
self.meta['id'] = self._createId(self.text)
self.meta['updated'] = datetime.datetime.now()
if dueDate:
hour = int(timer[0]) if timer else 0
minute = int(timer[1]) if timer else 0
self.meta['dueDate'] = dueDate.replace(hour=hour, minute=minute, second=0)
def _parseHumanDueDate(self, text):
dueDatePattern = r"=[\w-]+"
matches = re.findall(dueDatePattern, text)
if not matches:
return [None, text]
dueDate = dateparser.parse(
matches[-1][1:].replace('_', ' '),
settings={'PREFER_DATES_FROM': 'future'}
)
return [dueDate, text.replace(matches[-1], '').strip()]
def _parseHumanNotificationTimer(self, text):
timerPattern = r"\@[\d:]+"
matches = re.findall(timerPattern, text)
if not matches:
return [None, text]
timer = matches[-1][1:]
if ':' not in timer:
dueTime = datetime.datetime.now() + datetime.timedelta(minutes=int(timer))
timer = f"{dueTime.hour}:{dueTime.minute}"
return [timer.split(':'), text.replace(matches[-1], '').strip()]
def prettyPrint(self):
text = self.text
if 'dueDate' not in self.meta:
return text
today = datetime.datetime.today()
timerIsNeeded = self.meta['dueDate'].hour != 0 or self.meta['dueDate'].minute != 0
if today.strftime('%Y-%m-%d') != self.meta['dueDate'].strftime('%Y-%m-%d') or not timerIsNeeded:
text += f" ={humanize.naturalday(self.meta['dueDate']).replace(' ', '-')}"
if timerIsNeeded:
text += f" @{0 if self.meta['dueDate'].hour < 10 else ''}{self.meta['dueDate'].hour}:{0 if self.meta['dueDate'].minute < 10 else ''}{self.meta['dueDate'].minute}"
return text
def __str__(self):
meta = []
for index, data in self.meta.items():
if index == 'created' or index == 'updated' or index == 'dueDate':
data = data.strftime('%Y-%m-%d %H:%M:%S')
meta.append((index, data))
meta_str = ', '.join('%s:%s' % m for m in meta)
return '%s | %s\n' % (self.text, meta_str)
def __getattr__(self, attr):
if attr in self.meta:
return self.meta[attr]
elif attr == 'priority':
match = re.search(r"\bAA+", self.text)
return 0 if not match else len(match.group())
elif attr not in ('id', 'dueDate', 'created', 'updated'):
raise AttributeError('Unknow attribute: ' + attr)
else:
return ''
def finish(self):
self.meta['updated'] = datetime.datetime.now()
def isOverdue(self):
if 'dueDate' not in self.meta:
return False
return datetime.datetime.now() > self.meta['dueDate']
def schedule(self, offset):
self.meta['dueDate'] = dateparser.parse(offset, settings={
'PREFER_DATES_FROM': 'future'
})
|
#!/usr/bin/env python
# coding: utf-8
# # <center>Lab 1</center>
# ## <center> Optical Digit Recognition </center>
# 
# ### Description:
# The scope of this exercise is the implementation of __an optical digit recognition system__. Our dataset comes from __US Postal Service__, written by hand (scanned from postal envelopes), and contains digits from 0 to 9 separated in train and test set.
# ### Data:
# We are given two text files (train.txt and text.txt). Each line corresponds to a sample-digit and each collumn corresponds to a features of the digit. For example, the value (i, j) is the j-th feature of the i-th digit. Every digit is described from 257 values. The first value is the class (if it is 0, 1 etc) and the rest 256 values are the pixels that describe it in grayscale.
# ### Implementation:
# First, we import all the necessary libraries and suppress some unnecessary warnings.
# In[1]:
# various
import numpy as np
from matplotlib import pyplot as plt
import random
import scipy.stats
# sklearn
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.model_selection import KFold, learning_curve, ShuffleSplit, cross_val_score, train_test_split
from sklearn.svm import SVC
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier
# pytorch
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch import optim
# In[2]:
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# #### The first 13 steps were implemented as a part of the PrepareLab located in prepare_lab folder.
# __Step 1:__ Read input data from given text files.
# In[3]:
# Define useful variables
data_path = "./pr_lab1_2016-17_data_0/pr_lab1_2016-17_data"
train_size = 7291
test_size = 2007
n_features = 256
# Initialize X_train, X_test, y_train, y_test
X_train = np.zeros((train_size, n_features), dtype=np.float64)
X_test = np.zeros((test_size, n_features), dtype=np.float64)
y_train = np.zeros(train_size, dtype='int64')
y_test = np.zeros(test_size, dtype='int64')
# Read train data
with open(data_path + "/train.txt") as f:
for i, line in enumerate(f):
# Split i-th line
line = line.split()
# Keep the first collumn as the class of the i-th digit
y_train[i] = int(float(line[0]))
# Keep the rest 256 values as the pixels of the i-th digit.
for j, pixel in enumerate(line[1:]):
X_train[i][j] = pixel
print("Finished reading training data.")
# Read test data
with open(data_path + "/test.txt") as f:
for i, line in enumerate(f):
# Split i-th line
line = line.split()
# Keep the first collumn as the class of the i-th digit
y_test[i] = int(float(line[0]))
# Keep the rest 256 values as the pixels of the i-th digit.
for j, pixel in enumerate(line[1:]):
X_test[i][j] = pixel
print("Finished reading test data.")
# __Step 2:__ Display a certain sample (index 131) as an 16x16 image.
# In[4]:
# Reshape the 256 vector in a 16x16 matrix.
img_131 = np.reshape(X_train[131], (16, 16))
# Turn the axis off and display the image.
plt.axis('off')
plt.imshow(img_131)
# __Step 3:__ Display one random image from each digit.
# In[5]:
# Define a figure with 10 plots.
fig = plt.figure(figsize=(15,6))
columns = 5
rows = 2
for digit in range(10):
# Pick all images of current digit
curr_data = []
for j, y in enumerate(y_train):
if y == digit:
curr_data.append(X_train[j])
# Select randomly an image
sample = random.choice(curr_data)
# Display the randomly selected image in a subplot
fig.add_subplot(rows, columns, digit+1)
plt.axis('off')
plt.imshow(np.reshape(sample, (16, 16)))
plt.show()
# __Step 4:__ Compute the mean value of pixel (10,10) of all 0's in the train set.
# In[6]:
# Get indexes of 0's in the train set
idx_0 = [i for i in range(train_size) if y_train[i] == 0]
# Get pixel (10,10) of all 0's
X_train_0_10 = np.take(X_train[:, 10*16+10], idx_0)
# Compute mean
mean_0_10 = np.mean(X_train_0_10)
print("Mean value of pixel (10, 10) of all 0's in the train set is: " + str(mean_0_10))
# __Step 5:__ Compute variance of (10,10) pixel of all 0's in the train set
# In[7]:
var_0_10 = np.var(X_train_0_10)
print("Variance of pixel (10, 10) of all 0's in the train set is: " + str(var_0_10))
# __Step 6:__ Compute mean value and variance of every pixel of 0's in the train set
# In[8]:
# Get pixels of all 0's
X_train_0 = np.take(X_train, idx_0, axis=0)
# Compute mean value along each pixel
mean_0 = np.mean(X_train_0, axis=0, keepdims=True)
# Compute variance along each pixel
var_0 = np.var(X_train_0, axis=0, keepdims=True)
# Verify their shape
print("Shape of mean values: " + str(mean_0.shape))
print("Shape of variances: " + str(var_0.shape))
# __Step 7:__ Display digit '0' using the mean value of each pixel.
# In[9]:
plt.axis("off")
plt.imshow(np.reshape(mean_0, (16, 16)))
# __Step 8:__ Display '0' using the variance of each pixel.
# In[10]:
plt.axis("off")
plt.imshow(np.reshape(var_0, (16, 16)))
# We observe that the digit in the mean-image contains less noise than in the variance-image. However, in both images the digit can be distinguished.
# __Step 9:__
#
# __(a)__ Compute the mean value and the variance for all digits (0-9).
# In[11]:
mean = np.zeros((10, 256))
var = np.zeros((10, 256))
for digit in range(10):
idx_i = [i for i in range(train_size) if y_train[i] == digit]
X_train_i = np.take(X_train, idx_i, axis=0)
mean[digit, :] = np.mean(X_train_i, axis=0, keepdims=True)
var[digit, :] = np.var(X_train_i, axis=0, keepdims=True)
# __(b)__ Display all digits using their computed mean value.
# In[12]:
fig = plt.figure(figsize=(15,6))
columns = 5
rows = 2
for digit in range(10):
fig.add_subplot(rows, columns, digit+1)
plt.axis('off')
plt.imshow(np.reshape(mean[digit, :], (16, 16)))
plt.show()
# __Step 10:__ Classify X_test[101], using Euclidean distance.
# In[13]:
# Define a function that classifies a sample based on the
# euclidean distance.
def predict_eucl(x):
pred = 0
dist = np.linalg.norm(x - mean[0, :])
for i in range(1, 10):
if np.linalg.norm(x - mean[i, :]) < dist:
dist = np.linalg.norm(x - mean[i, :])
pred = i
return pred
print("Prediction: " + str(predict_eucl(X_test[101])))
print("Ground truth: " + str(y_test[101]))
# In[14]:
plt.axis('off')
plt.imshow(np.reshape(X_test[101], (16, 16)))
# We observe that the classification is wrong, since X_test[101] is the digit 6.
# __Step 11:__
#
# __(a)__ Classify test set using Euclidean distance
# In[15]:
# Compute predictions for each test sample
y_pred = np.zeros(test_size)
for i, x in enumerate(X_test):
y_pred[i] = predict_eucl(x)
# __(b)__ Compute accuracy
# In[16]:
# Count number of correct predictions and output the total accuracy.
corr = 0
for i in range(len(y_test)):
if y_test[i] == y_pred[i]:
corr += 1
acc = corr / len(y_test) * 100
print("Accuracy of Euclidean classifier in test set: " + str(acc))
# __Step 12:__ Create a scikit-learn euclidean estimator
# In[17]:
class EuclideanClassifier(BaseEstimator, ClassifierMixin):
"""Classify samples based on the distance from the mean feature value"""
def __init__(self):
self.X_mean_ = None
self.classes_ = None
def fit(self, X, y):
"""
This should fit classifier. All the "work" should be done here.
Calculates self.X_mean_ based on the mean
feature values in X for each class.
self.X_mean_ becomes a numpy.ndarray of shape
(n_classes, n_features)
fit always returns self.
"""
# Compute classes
self.classes_ = np.unique(y)
train_size, n_features = X.shape
n_classes = len(self.classes_)
self.X_mean_ = np.zeros((n_classes, n_features))
for k in range(n_classes):
idx_i = [i for i in range(train_size) if y[i] == k]
X_k = np.take(X, idx_i, axis=0)
self.X_mean_[k, :] = np.mean(X_k, axis=0, keepdims=True)
return self
def predict(self, X):
"""
Make predictions for X based on the
euclidean distance from self.X_mean_
"""
closest = np.argmin(euclidean_distances(X, self.X_mean_), axis=1)
return closest
def score(self, X, y):
"""
Return accuracy score on the predictions
for X based on ground truth y
"""
corr = 0
y_pred = self.predict(X)
corr = sum(int(y[i] == y_pred[i]) for i in range(len(y)))
acc = corr / len(y)
return acc
# __Step 13:__
#
# __(a)__ Score above euclidean classifier using 5-fold cross-validation
# In[18]:
# Define a custom scorer
def my_scorer(clf, X, y_true):
return clf.score(X, y_true)
# Create the classifier
clf = EuclideanClassifier()
scores = cross_val_score(clf, X_train, y_train,
cv=KFold(n_splits=5, random_state=42),
scoring=my_scorer)
print("Euclidean Classifier score from 5-fold cross-validation = %f +-%f" % (np.mean(scores), np.std(scores)))
# __(b)__ Plot the decision surface of the euclidean classifier
# In[19]:
# Define a function that plots the decision surface of 2-dimensional data
def plot_clf(clf, X, y, labels):
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of Classifier')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
x_min, x_max = X0.min() - 1, X0.max() + 1
y_min, y_max = X1.min() - 1, X1.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .05),
np.arange(y_min, y_max, .05))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
zero = ax.scatter(
X0[y == 0], X1[y == 0],
c='blue', label=labels[0],
s=60, alpha=0.9, edgecolors='k')
one = ax.scatter(
X0[y == 1], X1[y == 1],
c='red', label=labels[1],
s=60, alpha=0.9, edgecolors='k')
two = ax.scatter(
X0[y == 2], X1[y == 2],
c='purple', label=labels[2],
s=60, alpha=0.9, edgecolors='k')
three = ax.scatter(
X0[y == 3], X1[y == 3],
c='green', label=labels[3],
s=60, alpha=0.9, edgecolors='k')
four = ax.scatter(
X0[y == 4], X1[y == 4],
c='gray', label=labels[4],
s=60, alpha=0.9, edgecolors='k')
five = ax.scatter(
X0[y == 5], X1[y == 5],
c='orange', label=labels[5],
s=60, alpha=0.9, edgecolors='k')
six = ax.scatter(
X0[y == 6], X1[y == 6],
c='black', label=labels[6],
s=60, alpha=0.9, edgecolors='k')
seven = ax.scatter(
X0[y == 7], X1[y == 7],
c='pink', label=labels[7],
s=60, alpha=0.9, edgecolors='k')
eight = ax.scatter(
X0[y == 8], X1[y == 8],
c='white', label=labels[8],
s=60, alpha=0.9, edgecolors='k')
nine = ax.scatter(
X0[y == 9], X1[y == 9],
c='yellow', label=labels[9],
s=60, alpha=0.9, edgecolors='k')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
# Since our data is 256-dimensional, we should apply a dimensionality reduction technique in order to plot them in 3D space. We choose to use PCA.
# In[20]:
# Define PCA
pca = PCA(n_components=2)
pca.fit(X_train)
# Apply PCA on train and test set
X_train_2d = pca.transform(X_train)
X_test_2d = pca.transform(X_test)
# In[21]:
# Train a classifier in th 2D data and plot the decision boundary.
clf = EuclideanClassifier()
clf.fit(X_train_2d, y_train)
plot_clf(clf, X_test_2d, y_test, [i for i in range(10)])
# The plot is a bit complex, since we have 10 classes instead of 2.
# __(c)__ Plot the learning curve of the euclidean classifier.
# In[22]:
# Function from https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
# In[23]:
title = "Learning Curve of Euclidean Classifier"
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = EuclideanClassifier()
plot_learning_curve(estimator, title, X_train, y_train, (0.8, 1.01), cv=cv, n_jobs=8)
plt.show()
# #### The next steps are implemented as part of the main lab
# __Step 14:__ Compute the a-priori probabilities of each class, using the above formula:
#
# \begin{align*}
# prior(c_i) = \frac{N_i}{N}
# \end{align*}
#
# where $N_i$ is the number of the training samples that represent digit i and $N$ is the training size.
# In[24]:
prior = np.bincount(y_train.astype(int)) / train_size
for i in range(10):
print("Digit " + str(i) + ": " + str(prior[i]))
# For testing puproses
print("Sum is equal to: " + str(sum(prior)))
# __Step 15:__
#
# __(a)__ Creation of a Gaussian Naive Bayes classifier using NumPy.
# The Naive Bayes classifier is based on the above equation:
#
# \begin{align*}
# posterior = \frac{likelihood * prior}{evidence}
# \end{align*}
#
# or more formally,
#
# \begin{align*}
# P(c_i | x) = \frac{P(x | c_i) * P(c_i)}{P(c_i)}
# \end{align*}
#
# In practice, there is interest only in the numerator of that fraction, because the denominator does not depend on C and the values of the features $x_{i}$ are given, so that the denominator is effectively constant. The prior probabilities $P(c_i)$ can be computed as above and the likelihood $P(x | c_i)$ is taken from a normal distribution with the mean value and the variance of the corresponding pixel. After computing the above fraction, the class with the maximum posterior probability is taken. This is known as the maximum a posteriori or MAP decision rule.
#
# \begin{align*}
# y = argmax_{k \in {0, .., 9}} P(c_k) \prod_{i=i}^k P(x_i | c_k)
# \end{align*}
#
# In[25]:
class GaussianNB_np(BaseEstimator, ClassifierMixin):
"""Classify samples based on the Gaussian Naive Bayes"""
def __init__(self):
self.X_mean_ = None
self.X_var_ = None
self.prior = None
self.n_classes = None
def fit(self, X, y):
"""
This should fit classifier. All the "work" should be done here.
Calculates self.X_mean_ and self.X_var_ based on the mean
feature values in X for each class. Also, calculates self.prior
that contains the prior probability of each class.
self.X_mean_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.X_var_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.prior becomes a numpy.array of shape
(n_classes)
fit always returns self.
"""
# Initialize useful variables
train_size, n_features = X.shape
self.n_classes = len(np.unique(y))
self.X_mean_ = np.zeros((self.n_classes, n_features))
self.X_var_ = np.zeros((self.n_classes, n_features))
# Compute mean and variance values for each class
for k in range(self.n_classes):
idx_i = [i for i in range(train_size) if y[i] == k]
X_k = np.take(X, idx_i, axis=0)
self.X_mean_[k, :] = np.mean(X_k, axis=0, keepdims=True)
self.X_var_[k, :] = np.var(X_k, axis=0, keepdims=True)
# Compute prior probabilities for each class
self.prior = np.bincount(y.astype(int)) / train_size
return self
def predict(self, X, smooth=None):
"""
Make predictions for X based on
the highest posterior probability
"""
# Compute likelihood
like = np.zeros((self.n_classes, len(X)))
# Define e for calculation stability (division by zero).
if smooth:
e = smooth
else:
e = 10**(-9)
for i in range(self.n_classes):
like[i] = np.prod(1/(np.sqrt(2*np.pi*self.X_var_[i]+ e)) * np.exp(-0.5*((X - self.X_mean_[i])**2 / (self.X_var_[i] + e))), axis=1)
return np.argmax(like.T * self.prior, axis=1)
def score(self, X, y, smooth=None):
"""
Return accuracy score on the predictions
for X based on ground truth y
"""
corr = 0
y_pred = self.predict(X, smooth)
corr = sum(int(y[i] == y_pred[i]) for i in range(len(y)))
acc = corr / len(y)
return acc
# __(b)__ Compute the accuracy of the above classifier using different smoothing parameters.
# In[26]:
# Define a custom scorer
def my_scorer(clf, X, y_true):
return clf.score(X, y_true)
# Define the classifier
gaussNB_np = GaussianNB_np()
gaussNB_np.fit(X_train, y_train)
print("Accuracy of custom NumPy GaussianNB classifier")
print()
# Predict using default smoothing.
print("Smoothing 1e-9: " + str(gaussNB_np.score(X_test, y_test)))
# Predict using 1e-6 smoothing.
print("Smoothing 1e-6: " + str(gaussNB_np.score(X_test, y_test, smooth=10**(-6))))
# Predict using 1e-3 smoothing.
print("Smoothing 1e-3: " + str(gaussNB_np.score(X_test, y_test, smooth=10**(-3))))
# Predict using 1 smoothing.
print("Smoothing 1: " + str(gaussNB_np.score(X_test, y_test, smooth=1)))
# __(c)__ Compare our custom implementation with scikit-learn GaussianNB implementation.
# In[27]:
# Define the sklearn classifier (default smoothing is 1e-9)
gaussNB = GaussianNB()
gaussNB.fit(X_train, y_train)
print("Accuracy of sklearn GaussianNB classifier")
print()
print(gaussNB.score(X_test, y_test))
# We observe that the accuracy between the NumPy and the sklearn Gaussian Naive Bayes classifiers are very close. However, when we are changing the smoothing factor, the NumPy implementation can perform a little better.
# __Step 16:__ Repeat Step 15, supposing that the variance of all features for all classes is equal to 1.
# In[28]:
class GaussianNB_np_var1(BaseEstimator, ClassifierMixin):
"""Classify samples based on the Gaussian Naive Bayes"""
def __init__(self):
self.X_mean_ = None
self.X_var_ = None
self.prior = None
self.n_classes = None
def fit(self, X, y):
"""
This should fit classifier. All the "work" should be done here.
Calculates self.X_mean_ and self.X_var_ based on the mean
feature values in X for each class. Also, calculates self.prior.
self.X_mean_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.X_var_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.prior becomes a numpy.array of shape
(n_classes)
fit always returns self.
"""
# Initialize useful variables
train_size, n_features = X.shape
self.n_classes = len(np.unique(y))
self.X_mean_ = np.zeros((self.n_classes, n_features))
# Set the variance equal to 1.
self.X_var_ = np.ones((self.n_classes, n_features))
# Compute mean for each class
for k in range(self.n_classes):
idx_i = [i for i in range(train_size) if y[i] == k]
X_k = np.take(X, idx_i, axis=0)
self.X_mean_[k, :] = np.mean(X_k, axis=0, keepdims=True)
# Compute prior probabilities for each class
self.prior = np.bincount(y.astype(int)) / train_size
return self
def predict(self, X, smooth=None):
"""
Make predictions for X based on
the highest posterior probability
"""
# Compute likelihood
like = np.zeros((self.n_classes, len(X)))
# Define e for calculation stability (division by zero).
if smooth:
e = smooth
else:
e = 10**(-9)
for i in range(self.n_classes):
like[i] = np.prod(1/(np.sqrt(2*np.pi*self.X_var_[i]+ e)) * np.exp(-0.5*((X - self.X_mean_[i])**2 / (self.X_var_[i] + e))), axis=1)
return np.argmax(like.T * self.prior, axis=1)
def score(self, X, y, smooth=None):
"""
Return accuracy score on the predictions
for X based on ground truth y
"""
corr = 0
y_pred = self.predict(X, smooth)
corr = sum(int(y[i] == y_pred[i]) for i in range(len(y)))
acc = corr / len(y)
return acc
# In[29]:
# Define a custom scorer
def my_scorer(clf, X, y_true):
return clf.score(X, y_true)
# Define the classifier
gaussNB_np_var1 = GaussianNB_np_var1()
gaussNB_np_var1.fit(X_train, y_train)
print("Accuracy of custom NumPy GaussianNB classifier, considering unit variance")
print()
# Predict using default smoothing.
print("Smoothing 1e-9: " + str(gaussNB_np_var1.score(X_test, y_test)))
# Predict using 1e-6 smoothing.
print("Smoothing 1e-6: " + str(gaussNB_np_var1.score(X_test, y_test, smooth=10**(-6))))
# Predict using 1e-3 smoothing.
print("Smoothing 1e-3: " + str(gaussNB_np_var1.score(X_test, y_test, smooth=10**(-3))))
# Predict using 1 smoothing.
print("Smoothing 1: " + str(gaussNB_np_var1.score(X_test, y_test, smooth=1)))
# Let's summarize all the implementations of a Gaussian Naive Bayes classifier in the above table:
#
#
#
# | Type | Variance | Smooth | Accuracy |
# | :--- | --- | --- | --- |
# | Numpy | trained | 1e-9 | 0.717 |
# | Numpy | trained | 1e-6 | 0.731 |
# | Numpy | trained | 1e-3 | 0.761 |
# | Numpy | trained | 1 | 0.302 |
# | Sklearn | trained | 1e-9 | 0.719 |
# | Numpy | 1 | 1e-9 | __0.813__ |
# | Numpy | 1 | 1e-6 | __0.813__ |
# | Numpy | 1 | 1e-3 | __0.813__ |
# | Numpy | 1 | 1 | 0.812 |
#
# As we can see, if we consider unit variance, accuracy increases by 5-10%.
# __Step 17:__ Compare the performance of Naive Bayes, Nearest Neighbors, SVM (using different kernels).
# In[30]:
# Define Nearest Neighbors classifier using 3, 5 and 7 number of neighbors.
neigh3 = KNeighborsClassifier(n_neighbors=3)
neigh3.fit(X_train, y_train)
neigh5 = KNeighborsClassifier(n_neighbors=5)
neigh5.fit(X_train, y_train)
neigh7 = KNeighborsClassifier(n_neighbors=7)
neigh7.fit(X_train, y_train)
print("Accuracy of Nearest Neihbors classifier, considering different number of neighbors")
print()
print("Neighbors = 3: " + str(neigh3.score(X_test, y_test)))
print("Neighbors = 5: " + str(neigh5.score(X_test, y_test)))
print("Neighbors = 7: " + str(neigh7.score(X_test, y_test)))
# In[31]:
# Define SVM classifier using different kernels.
svm_lin = SVC(kernel="linear", probability=True)
svm_lin.fit(X_train, y_train)
svm_rbf = SVC(kernel="rbf", probability=True)
svm_rbf.fit(X_train, y_train)
svm_poly = SVC(kernel="poly", probability=True)
svm_poly.fit(X_train, y_train)
svm_sigm = SVC(kernel="sigmoid", probability=True)
svm_sigm.fit(X_train, y_train)
print("Accuracy of SVM classifier, considering different kernels")
print()
print("Kernel = linear: " + str(svm_lin.score(X_test, y_test)))
print("Kernel = rbf: " + str(svm_lin.score(X_test, y_test)))
print("Kernel = poly: " + str(svm_poly.score(X_test, y_test)))
print("Kernel = sigmoid: " + str(svm_sigm.score(X_test, y_test)))
# Best implementations so far:
#
# | Classifier | Type | Accuracy |
# | :--- | --- | --- |
# | SVM | poly kernel | 0.947 |
# | NN | 3 neighbors | 0.945 |
# | NN | 5 neighbors | 0.945 |
# | NN | 7 neighbors | 0.942 |
# | SVM | rbf kernel | 0.926 |
# | SVM | linear kernel | 0.926 |
# | SVM | sigmoid kernel | 0.915 |
# | GaussianNB | var = 1 | 0.813 |
#
# All Nearest Neighbors implementations along with the poly SVM have almost 95% accuracy. As we expected, the Naive Bayes classifiers perform worse than the other classifiers, due to their simplicity.
# __Step 18:__ This step aims to combine different classifiers to achieve higher accuracy. This technique is known as __ensembling__. It is important that the combined classifiers misclassify different classes, e.g. classifier 1 tends to misclassify digit 3 and classifier 2 tends to misclassify digit 7 e.t.c .
# First, let's check the type of misprediction for each of the above classifiers.
# In[32]:
n_classifiers = 8
names = ["svm_poly", "neigh3", "neigh5", "neigh7", "svm_rbf", "svm_lin", "svm_sigm", "gaussNB_np_var1"]
classifiers = [svm_poly, neigh3, neigh5, neigh7, svm_rbf, svm_lin, svm_sigm, gaussNB_np_var1]
misses = np.zeros((n_classifiers, 10))
for i, clf in enumerate(classifiers):
y_pred = clf.predict(X_test)
for j in range(len(y_pred)):
if y_pred[j] != y_test[j]:
misses[i, int(y_test[j])] += 1
# In[33]:
print("Number of digits not correctly classified for each classifier:")
print()
for i in range(8):
print(names[i])
print(misses[i])
print()
# In[34]:
print("Top 3 misclassified digits: ")
print()
for i in range(8):
best_3 = misses[i].argsort()[-3:][::-1]
print(names[i] + ": " + str(best_3[0]) + " " + str(best_3[1]) + " " + str(str(best_3[2])))
# In[35]:
print("Last 3 misclassified digits: ")
print()
for i in range(8):
last_3 = misses[i].argsort()[:3]
print(names[i] + ": " + str(last_3[0]) + " " + str(last_3[1]) + " " + str(last_3[2]))
# __(a)__ Choose some previous classifier and combine them using Voting Classifier.
#
# Considering the above statistics, the combination svm_poly + neigh5 + svm_lin seems like a good choice.
# In[36]:
v_clf1 = VotingClassifier(estimators=[('svm_poly', svm_poly), ('neigh5', neigh5), ('svm_lin', svm_lin)], voting='hard')
v_clf1.fit(X_train, y_train)
print("Hard Voting: " + str(v_clf1.score(X_test, y_test)))
# In[37]:
v_clf2 = VotingClassifier(estimators=[('svm_poly', svm_poly), ('neigh5', neigh5), ('svm_lin', svm_lin)], voting='soft')
v_clf2.fit(X_train, y_train)
print("Soft Voting: " + str(v_clf2.score(X_test, y_test)))
# __(b)__ Choose a classifier and use the BaggingClassifier to create an ensemble. The bagging technique is used to split the train set in random subsets and fit a classifer in each set.
# In[38]:
# Use the best classifier so far.
bag_5 = BaggingClassifier(svm_poly, n_estimators=5)
bag_10 = BaggingClassifier(svm_poly, n_estimators=10)
bag_5.fit(X_train, y_train)
bag_10.fit(X_train, y_train)
print("Bagging svm_poly using 5 estimators: " + str(bag_5.score(X_test, y_test)))
print("Bagging svm_poly using 10 estimators: " + str(bag_10.score(X_test, y_test)))
# __(c)__ In general, Bagging reduces overfitting (variance) by averaging or voting. However, in our case it did not incease our model's accuracy. The soft voting classifier achieved the best accuracy, since it combined the pros of different classifiers.
# __Step 19:__ Introduction in Neural Networks and Pytorch.
# __(a)__ Loading the Data
#
# - Create a Dataset
# In[39]:
class PostalDataset(Dataset):
"""
Our custom PyTorch Dataset, for manipulating US Postal Dataset.
What we have to do is to implement the 2 abstract methods:
- __len__(self): in order to let the DataLoader know the size
of our dataset and to perform batching, shuffling and so on...
- __getitem__(self, index): we have to return the properly
processed data-item from our dataset with a given index
"""
def __init__(self, X, y):
"""
Args:
X (list): List of training samples
y (list): List of training labels
"""
self.data = X
self.labels = y
def __len__(self):
"""
Must return the length of the dataset, so the dataloader can know
how to split it into batches
Returns:
(int): the length of the dataset
"""
return len(self.data)
def __getitem__(self, idx):
"""
Returns the idx-th item from the dataset
Args:
index (int):
Returns:
(tuple):
* example (ndarray): the training example
* label (int): the class label
"""
return self.data[idx], self.labels[idx]
# - Define a train, dev and test Dataset
# Split training set in training and validation set
# In[40]:
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.3, random_state=1)
# In[41]:
train_set = PostalDataset(X_train, y_train)
val_set = PostalDataset(X_val, y_val)
test_set = PostalDataset(X_test, y_test)
# In[42]:
print("Training set size: " + str(len(train_set)))
print("Validation set size: " + str(len(val_set)))
print("Test set size: " + str(len(test_set)))
# - Define a train and a test DataLoader
# In[43]:
train_loader = DataLoader(train_set, batch_size=32,
shuffle=True, num_workers=8)
val_loader = DataLoader(val_set, batch_size=32,
shuffle=True, num_workers=8)
test_loader = DataLoader(test_set, batch_size=32,
shuffle=True, num_workers=8)
# __(b)__ Define different fully connected neural network architectures.
#
# - Two layer neural network using ReLU activation function.
# In[44]:
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.relu = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, data):
hidden = self.relu(self.linear1(data.float()))
preds = self.linear2(hidden)
return preds
# - Three layer neural network using ReLU activation function.
# In[45]:
class ThreeLayerNet(torch.nn.Module):
def __init__(self, D_in, H_1, H_2, D_out):
super(ThreeLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H_1)
self.relu = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(H_1, H_2)
self.linear3 = torch.nn.Linear(H_2, D_out)
def forward(self, data):
hidden_1 = self.relu(self.linear1(data.float()))
hidden_2 = self.relu(self.linear2(hidden_1))
preds = self.linear3(hidden_2)
return preds
# - Two layer neural network using tanh activation function.
# In[46]:
class TwoLayerNet_tanh(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet_tanh, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.tanh = torch.nn.Tanh()
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, data):
hidden = self.tanh(self.linear1(data.float()))
preds = self.linear2(hidden)
return preds
# - Three layer neural network using tanh activation function.
# In[47]:
class ThreeLayerNet_tanh(torch.nn.Module):
def __init__(self, D_in, H_1, H_2, D_out):
super(ThreeLayerNet_tanh, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H_1)
self.tanh = torch.nn.Tanh()
self.linear2 = torch.nn.Linear(H_1, H_2)
self.linear3 = torch.nn.Linear(H_2, D_out)
def forward(self, data):
hidden_1 = self.tanh(self.linear1(data.float()))
hidden_2 = self.tanh(self.linear2(hidden_1))
preds = self.linear3(hidden_2)
return preds
# __(c)__ Train the NN in our train data and evaluate it on the validation data.
# In[53]:
# D_in is input dimension;
# D_out is output dimension.
D_in, D_out = 256, 10
# Construct our model
layers = int(input("Number of hidden layers: (1 or 2)"))
if layers == 1:
size1 = int(input("Size of hidden layer: "))
size2 = None
else:
size1 = int(input("Size of 1st hidden layer: "))
size2 = int(input("Size of 2nd hidden layer: "))
act = input("Activation function: (relu or tanh)")
if layers == 1:
if act == "relu":
model = TwoLayerNet(D_in, size1, D_out)
else:
model = TwoLayerNet_tanh(D_in, size1, D_out)
else:
if act == "relu":
model = ThreeLayerNet(D_in, size1, size1, D_out)
else:
model = ThreeLayerNet_tanh(D_in, size1, size2, D_out)
print(model)
print()
# Define criterion and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
prev_val = 0
ep = 1
while True:
print("Epoch: " + str(ep))
# Train and evaluate on train set
correct_train = 0
total_train = 0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total_train += labels.size(0)
correct_train += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print('Accuracy in train: %f %%' % (
100 * correct_train / total_train))
# Evaluate on validation set
correct_val = 0
total_val = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total_val += labels.size(0)
correct_val += (predicted == labels).sum().item()
print('Accuracy in val: %f %%' % (
100 * correct_val / total_val))
# Stop when validation accuracy stops increasing.
curr_val = correct_val / total_val
if curr_val + 0.001 < prev_val:
break
prev_val = curr_val
ep += 1
print('Finished Training')
# In the training we can change the following hyperparameters:
# - Model
# - Number of layers
# - Size of each layer
# - Activation functions
#
# In the following table, we summarize the results for different combinations of the above parameters:`
# | Hidden Layers | Activation Type | Size of each layer | Epochs | Accuracy (train) | Accuracy (val) |
# | --- | --- | --- | --- | --- | --- |
# | 1 | ReLU | 16 | 8 | 95.66 |90.98 |
# | 1 | ReLU | 32 | 5 | 95.87 | 90.63 |
# | 1 | ReLU | 64 | 9 |98.28 | 92.13 |
# | 1 | ReLU | 128 | 7 |97.98 | 92.23 |
# | --- | --- | --- | --- | --- | --- |
# | 2 | ReLU | 32 - 16 | 10 |97.55 | 91.38 |
# | 2 | ReLU | 64 - 32 | 10 |98.54 | 91.03 |
# | 2 | ReLU | 128 - 64 | 7 |98.47 | 92.18 |
# | --- | --- | --- | --- | --- | --- |
# | 1 | tanh | 16 | 9 | 96.26 |91.03 |
# | 1 | tanh | 32 | 5 | 96 | 91.33 |
# | 1 | tanh | 64 | 5 |96.61 | 91.33 |
# | 1 | tanh | 128 | 6 |97.43 | 91.78 |
# | --- | --- | --- | --- | --- | --- |
# | 2 | tanh | 32 - 16 | 9 |98.04 | 92.13 |
# | 2 | tanh | 64 - 32 | 8 |98.49 | 91.93 |
# | 2 | tanh | 128 - 64 | 11 | 98.36 | __92.33__ |
# In[54]:
# Evaluate the best model on test set
# tanh with 2 hidden layers (128 - 65)
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total_val += labels.size(0)
correct_val += (predicted == labels).sum().item()
print(correct_val / total_val)
# __(c)__ Convert the PyTorch Neural Network implementation to sklearn compatible.
#
# We will convert the TwoLayerNet.
# In[55]:
class NN_sklearn(BaseEstimator, ClassifierMixin):
def __init__(self, in_dim, out_dim, hidden_dim, batch_size):
self.D_in = in_dim
self.D_out = out_dim
self.H = hidden_dim
self.batch_size = batch_size
self.model = TwoLayerNet(self.D_in, self.H, self.D_out)
def fit(self, X, y):
train_set = PostalDataset(X, y)
train_loader = DataLoader(train_set, batch_size=self.batch_size,
shuffle=True, num_workers=8)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(self.model.parameters())
prev_val = 0
ep = 1
while True:
print("Epoch: " + str(ep))
correct_train = 0
total_train = 0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
_, predicted = torch.max(outputs.data, 1)
total_train += labels.size(0)
correct_train += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print('Accuracy in train: %f %%' % (
100 * correct_train / total_train))
correct_val = 0
total_val = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = self.model(images)
_, predicted = torch.max(outputs.data, 1)
total_val += labels.size(0)
correct_val += (predicted == labels).sum().item()
print('Accuracy in val: %f %%' % (
100 * correct_val / total_val))
curr_val = correct_val / total_val
if curr_val + 0.001 < prev_val:
break
prev_val = curr_val
ep += 1
return self
def score(self, X, y):
"""
Return accuracy score on the predictions
for X based on ground truth y
"""
test_set = PostalDataset(X, y)
test_loader = DataLoader(test_set, batch_size=1, num_workers=8)
correct = 0
total = 0
predictions = []
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = self.model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct/total
# In[56]:
NN_sk = NN_sklearn(256, 10, 32, 32)
NN_sk.fit(X_train, y_train)
# __(d)__ Evaluate out NN in our test set.
# In[57]:
print("Sklearn combatible NN " + str(NN_sk.score(X_test, y_test)))
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 2 16:40:26 2020
@author: Leonardo
University of Oxford - Department of Computer Science - AE03
"""
# NEXT FEATURES!
# Retry system. If something fails or is not doing correctly. Stop and retry.
# Verification of existing processes to be closed before start.
# GUI
# Install external components automatically at first execution
import pip
import os
import pyautogui # pip install selenium
import subprocess
import time
import re
import glob # Read file names with RegEx
import shutil # Move files easily
import ctypes # Mesage box and alerts
from selenium import webdriver # pip install selenium
from selenium.webdriver.support.ui import Select # Support for user interface
from selenium.webdriver.support.ui import WebDriverWait # Wait until the page is complete
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from datetime import date
tjsp_cadernos = 'https://dje.tjsp.jus.br/cdje/index.do;jsessionid=904E03097EDCF133765EE6CB860DFD6B.cdje2'
esaj_consulta_processos = 'https://esaj.tjsp.jus.br/sajcas/login?service=https%3A%2F%2Fesaj.tjsp.jus.br%2Fesaj%2Fj_spring_cas_security_check'
adobePath = r"C:\Program Files (x86)\Adobe\Acrobat Reader DC\Reader\AcroRd32.exe"
files_path = os.path.expanduser('~\Downloads')
robot_path = os.path.expanduser('~\Desktop')
full_text = []
time_control = 5
key = "ADD YOUR ID (BRAZIL CPF) OR KEY FOR ESAJ PORTAL HERE"
raisepass = "ADD YOUR PASSWORD HERE"
execution_control = [0, 0, 0, 0, 0, 0, 0]
def import_or_install(package):
try:
__import__(package)
except ImportError:
pip.main(['install', package])
# 1.Setup environment --> Check if the environment is ok
def setup_environment():
print("Checking system requirements")
# msgbox_val = ctypes.windll.user32.MessageBoxW(0, "", "Robo Law - version 1.2.5", 0)
execution_control[0] = 1
return 1
# 2. Download pdf file from justice court web page
def collect_data():
chrome_driver_path = os.path.dirname(os.path.abspath(__file__)) + '\chromedriver_84'
browser = webdriver.Chrome(executable_path = chrome_driver_path)
browser.get(tjsp_cadernos)
search_elem = WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.ID, 'consultar')))
# Select the notebooks to be downloaded. We will download the entire file for performance purposes
select_cadastro = Select(browser.find_element_by_id('cadernosCad'))
select_cadastro.select_by_value("12")
elem_consulta = browser.find_element_by_id('download')
elem_consulta.click()
# If the download is not completed, wait, otherwise Go!
while not os.path.exists(files_path + '\caderno3-Judicial-1ªInstancia-Capital.pdf'):
time.sleep(1)
if os.path.isfile(files_path + '\caderno3-Judicial-1ªInstancia-Capital.pdf'):
browser.quit()
execution_control[1] = 1
else:
print("1x00: Download failed. Please reboot the process")
browser.quit()
return 1
# 3. Converts PDF to TXT using adobe reader
def process_pdf_to_txt(file_name, file_type):
# Open adobe reader and use it to save as .txt
if file_type == 1: # 1 stands for 'one heavy' processing files
subprocess.Popen("%s %s" % (adobePath, files_path + file_name + ".pdf"))
time.sleep(10)
pyautogui.keyDown('alt')
time.sleep(5)
pyautogui.press('q')
time.sleep(5)
pyautogui.press('v')
time.sleep(5)
pyautogui.keyUp('alt')
time.sleep(5)
print(files_path + file_name + '.txt')
pyautogui.write(files_path + file_name + '.txt')
time.sleep(5)
pyautogui.press('enter')
time.sleep(6)
# Check if the transformation for txt is complete
current_size = os.stat(files_path + '\caderno3-Judicial-1Instancia-Capital' + '.txt').st_size
previous_size = -1
width = int((pyautogui.size()[0]) / 2)
height = int((pyautogui.size()[1]) / 2)
pyautogui.moveTo(width,height)
while current_size != previous_size:
previous_size = current_size
pyautogui.click()
time.sleep(60)
current_size = os.stat(files_path + '\caderno3-Judicial-1Instancia-Capital' + '.txt').st_size
pyautogui.press('enter')
pyautogui.press('enter')
time.sleep(5)
pyautogui.keyDown('alt')
pyautogui.press('f4')
pyautogui.keyUp('alt')
time.sleep(1)
# Finish the process after conversion
os.system("taskkill /f /im " + "AcroRd32.exe")
else:
pdf_files_path = []
for file in glob.glob(os.getcwd() +'\data' + file_name):
pdf_files_path.append(file)
print('Amount of pdf files to be converted: ', len(pdf_files_path))
print(pdf_files_path)
for file in pdf_files_path:
subprocess.Popen("%s" % (adobePath))
time.sleep(5)
pyautogui.keyDown('alt')
time.sleep(5)
pyautogui.press('q')
time.sleep(5)
pyautogui.press('a')
pyautogui.keyUp('alt')
time.sleep(5)
pyautogui.write(file)
time.sleep(5)
pyautogui.keyDown('alt')
pyautogui.press('a')
pyautogui.keyUp('alt')
time.sleep(5)
pyautogui.keyDown('alt')
time.sleep(5)
pyautogui.press('q')
time.sleep(5)
pyautogui.press('v')
time.sleep(5)
pyautogui.keyUp('alt')
time.sleep(5)
pyautogui.press('enter')
time.sleep(12)
os.system("taskkill /f /im " + "AcroRd32.exe")
execution_control[2] = 1
return 1
# 4. Process files
def get_cases_from_txt(file_name):
cases = []
cases_details = []
time.sleep(5)
# Reading the txt file into a list
file_reader = open(files_path + file_name + '.txt', mode='r', encoding='utf-8', errors='ignore')
# Cleaning before Reading strategy!
for line in file_reader:
if line.rstrip('\n') != "": # Remove empty indexes from the list
full_text.append(line.rstrip("\n")) # Remove new lines
# The same thing below, but with different syntax
for index, item in enumerate(full_text):
if item.find('CLASSE :BUSCA E APREENSÃO EM ALIENAÇÃO FIDUCIÁRIA') != -1 and len(full_text[index - 1].replace('PROCESSO :',"")) == 26 and (full_text[index + 1].replace('REQTE :',"").replace('REQTE ',"").count('.')) == 0:
if str(full_text[index + 3]).find('REQTE') == 0:
req = ""
else:
req = full_text[index + 3].replace('REQDO :',"").replace('REQDA :',"").replace('REQDA ',"").replace('REQDO ',"")
cases.append(full_text[index - 1].replace('PROCESSO :',""))
cases_details.append({'REQTE': full_text[index + 1].replace('REQTE :',"") ,
'REQDO': req if len(full_text[index + 3]) > 5 else ""
})
print("Amount of cases to be searched:", len(cases))
execution_control[3] = 1
return cases, dict(zip(cases, cases_details))
# 5. Check the process on web
# Potential Exception: SessionNotCreatedException:
def check_process(cases):
cases_file_map = []
try:
chrome_driver_path = os.path.dirname(os.path.abspath(__file__)) + '\chromedriver_84'
browser = webdriver.Chrome(executable_path = chrome_driver_path)
browser.maximize_window()
browser.get(esaj_consulta_processos)
root_window = browser.window_handles[0]
time.sleep(time_control)
browser.find_element_by_id('usernameForm').send_keys(key)
time.sleep(time_control)
browser.find_element_by_id('passwordForm').send_keys(raisepass)
time.sleep(time_control)
submit = browser.find_element_by_id('pbEntrar')
submit.click()
time.sleep(time_control)
search_process = browser.find_element_by_xpath('//*[@id="esajConteudoHome"]/table[2]/tbody/tr/td[2]/a')
search_process.click()
time.sleep(time_control)
for case in cases:
process_number_p1 = case.replace('.','').replace('-','')[0:13]
process_number_p2 = case.replace('.','').replace('-','')[-4:]
search_process_1level = browser.find_element_by_xpath('//*[@id="esajConteudoHome"]/table[1]/tbody/tr/td[2]/a')
search_process_1level.click()
time.sleep(time_control)
browser.find_element_by_id('numeroDigitoAnoUnificado').send_keys(process_number_p1)
time.sleep(time_control)
browser.find_element_by_id('foroNumeroUnificado').send_keys(process_number_p2)
time.sleep(time_control)
submit = browser.find_element_by_id('pbEnviar') # download for full download (website not working properly)
submit.click()
time.sleep(time_control)
access_elem = browser.find_element_by_xpath('//*[@id="linkPasta"]')
access_elem.click()
time.sleep(time_control)
tab_window = browser.window_handles[1]
browser.switch_to_window(tab_window)
browser.maximize_window()
time.sleep(time_control)
checkbox = browser.find_element_by_xpath('//*[@id="pagina_1_cont_0_anchor"]')
checkbox.click()
time.sleep(time_control)
pyautogui.press('enter')
time.sleep(time_control)
width = int((pyautogui.size()[0]) / 2)
height = int((pyautogui.size()[1]) / 2)
pyautogui.moveTo(width,height)
pyautogui.click()
time.sleep(time_control)
# Download button
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
time.sleep(time_control)
pyautogui.press('enter')
time.sleep(6)
pyautogui.keyDown('alt')
pyautogui.press('f4')
pyautogui.keyUp('alt')
browser.switch_to_window(root_window)
time.sleep(time_control)
back_search = browser.find_element_by_xpath('/html/body/div/table[2]/tbody/tr/td[2]/table/tbody/tr[3]/td/a[3]')
back_search.click()
time.sleep(7)
# Save to build a common df
for name in glob.glob(files_path + '\doc_*.pdf'):
filename = os.path.basename(name)
cases_file_map.append(filename[-12:-4])
shutil.move(name , os.getcwd() + '\data')
except:
pass
browser.quit()
execution_control[4] = 1
print('Amount cases:', len(cases))
print('Amount petitions:', len(cases_file_map))
return list(zip(cases, cases_file_map))
# 6. Convert initial petition to txt and detect informations
def extract_useful_info():
files = []
vehicle_roi = []
modelo = []
chassi = []
placa = []
renavam = []
word_bank = ['modelo', 'chassi', 'renavam', 'placa']
# 1. Get all files to be processed
for file in glob.glob(os.getcwd() + '\data' + '\doc_*.txt'):
file_reader = open(file, mode='r', encoding='utf-8', errors='ignore')
file_content = file_reader.readlines()
file_reader.close()
files.append(file[-12:-4])
# 2. Natural Language Processing >> ROI strategy: Search the region of interest for vehicle details
for statement in file_content:
for matcher in word_bank:
if matcher in statement.lower():
# Key: Document + Elegible text
vehicle_roi.append([file[-12:-4], statement.strip('\n')])
for statement in file_content:
if 'modelo' in statement.lower():
modelo.append([file[-12:-4], statement.lower().strip('\n').replace("modelo","").replace(":","").replace("marca","").replace('/'," ")])
if 'chassi' in statement.lower():
chassi.append([file[-12:-4], statement.lower().strip('\n').replace("chassi","").replace(":","")])
if 'renavam' in statement.lower():
text_found = statement.lower().strip('\n').replace("renavam","").rstrip().replace(":","")
if text_found.isdigit():
renavam.append([file[-12:-4], text_found])
if 'placa' in statement.lower():
placa.append([file[-12:-4], statement.lower().strip('\n').replace("placa","").replace(":","")])
#vehicle_information = []
#vehicle_information = extract_text(files, vehicle_roi)
execution_control[5] = 1
return files, modelo, chassi, renavam, placa
# 7. Natural Language Processing
def extract_text(files, textList):
# Controlers >> For each file to be processed, extract the following words from the textList (ROIs)
idx_list = 0
idx_file = 0
# Words to be extracted
marca = ""
modelo = ""
chassi = ""
renavam = ""
placa = ""
vehicle_information = []
while idx_file < len(files):
while idx_list < len(textList):
if files[idx_file] == textList[idx_list][0]:
text = textList[idx_list][1].lower().replace('.', "").replace(":","").split(" ")
for i, elem in enumerate(text):
if elem == 'marca':
marca = text[i + 1]
break
if elem == 'modelo':
modelo = text[i + 1]
break
if elem == 'chassi':
chassi = text[i + 1]
break
if elem == 'renavam':
if str(text[i + 1]).isdigit():
renavam = text[i + 1]
else:
pass
break
if elem == 'placa':
placa = text[i + 1]
break
idx_list += 1
vehicle_information.append({'FILE': files[idx_file],
'MARCA': marca,
'MODELO': modelo,
'CHASSI': chassi,
'RENAVAM': renavam,
'PLACA': placa
})
idx_file += 1
idx_list = 0
return vehicle_information
def get_info(key, cases_files_map):
idx_elem = 0
while idx_elem < len(cases_files_map):
if key == cases_files_map[idx_elem][0]:
file_name = (cases_files_map[idx_elem][1])
return file_name
break
idx_elem += 1
return ' '
# 8. Build final letter
def letter_builder(process_info, cases_files_map, models, chassi, car_id, plate):
for key, item in process_info.items():
fname = get_info(key, cases_files_map)
idx = 0
target = ['|PROCESSO|','|BANCO|','|NOME|','|ENDERECO|','|VEICULO|']
os.startfile(robot_path + '\RoboLaw\model.docx')
time.sleep(10)
pyautogui.keyDown('ctrl')
pyautogui.press('u')
pyautogui.keyUp('ctrl')
while idx < len(target):
time.sleep(3)
pyautogui.write(target[idx])
time.sleep(5)
pyautogui.press('tab')
time.sleep(5)
if idx == 0:
pyautogui.write(key)
elif idx == 1:
pyautogui.write(list(item.values())[0])
elif idx == 2:
pyautogui.write(list(item.values())[1])
elif idx == 3:
pyautogui.write('EM DESENVOLVIMENTO')
elif idx == 4:
file_model = get_info(fname, models)
file_chassi = get_info(fname, chassi)
file_carid = get_info(fname, car_id)
file_plate = get_info(fname, plate)
text = 'Modelo-Marca:' + str(file_model) + ' Chassi:' + str(file_chassi) + ' Renavam:' + str(file_carid) + ' Placa:' + str(file_plate)
pyautogui.write(text)
time.sleep(5)
pyautogui.keyDown('alt')
pyautogui.press('i')
pyautogui.keyUp('alt')
time.sleep(5)
pyautogui.press('enter')
idx += 1
pyautogui.press('esc')
time.sleep(5)
pyautogui.press('f12') # Salvar como
time.sleep(5)
pyautogui.write(robot_path + '\RoboLaw\letters\\'+ 'Carta_'+ key + "_" + fname) # Substituir aqui para o numero do processo com loop
time.sleep(5)
pyautogui.press('enter')
time.sleep(5)
pyautogui.press('enter')
time.sleep(5)
pyautogui.keyDown('alt')
pyautogui.press('f4')
pyautogui.keyUp('alt')
time.sleep(5)
# os.system("taskkill /f /im " + "WINWORD.EXE") # passar para alt + f4
def attempt_controller():
print('Under Construction')
def extract_address(cases_info):
print('Under Construction')
def clean_environment():
folder_name = str(date.today())
# Check if the folder already exists. Otherwise create a current date folder
if os.path.exists(robot_path + "\RoboLaw\hist\\" + folder_name) == False:
os.mkdir(robot_path + "\RoboLaw\hist\\" + folder_name)
# Move Caderno .txt and remove .pdf
for name in glob.glob(files_path + '\caderno3-Judicial*.txt'):
if os.path.exists(name):
filename = os.path.basename(name)
shutil.move(name , robot_path + "\RoboLaw\hist\\" + folder_name)
for name in glob.glob(files_path + '\caderno3-Judicial*.pdf'):
if os.path.exists(name):
filename = os.path.basename(name)
os.remove(name)
# Move .pdf files for hist
for name in glob.glob(robot_path + '\RoboLaw\data' + '\doc_*.pdf'):
if os.path.exists(name):
filename = os.path.basename(name)
shutil.move(name , robot_path + "\RoboLaw\hist\\" + folder_name)
# Delete .txt files
for name in glob.glob(robot_path + '\RoboLaw\data' + '\doc_*.txt'):
if os.path.exists(name):
filename = os.path.basename(name)
os.remove(name)
if __name__ == "__main__":
print("Loading...")
# ctypes.windll.user32.MessageBoxW(0, "Hello! =) I am a simple box", "Robo", 1)
# setup_environment()
collect_data()
process_pdf_to_txt(file_name= '\caderno3-Judicial-1ªInstancia-Capital', file_type= 1)
cases_list, cases_info = get_cases_from_txt(file_name= '\caderno3-Judicial-1Instancia-Capital')
cases_files_map = check_process(cases_list)
process_pdf_to_txt(file_name= '\doc_*', file_type= 0)
processed_files, models, chassi, car_id, plate = extract_useful_info() # NLP technique
address_details = extract_address(cases_info) # NLP technique
print('Cases Info length: ', len(cases_info)) # Let's check if the lenght is really the same
print('Mapping Cases x File length:', len(cases_files_map)) # Let's check if the lenght is really the same
letter_builder(cases_info, cases_files_map)
clean_environment()
# THIS SOFTWARE IS AT ALPHA VERSION -- BUG FIXES AND TESTING ARE NEEDED EXTENSIVELY
# cases_list >> List of all process individually
# cases_info >> Dictionary with process + individuals details (REQTE + REQDO)
# cases_files_map >> Dictionary with process + petition index files. Maps files to processes as index
# ON CODE VERSION CONTROL:
# v.1.2.1 >> Minor improvement: return map cases as List and not dictionary
# v.1.2.5 >> Transform to PDF Critical Update: Check each minute (60s) if the big file transformation is completed
# v.1.2.5 >> Add Vehicles information to be printed
# v.1.2.5 >> Improvements on software stability for 5s
# v.1.2.5 >> Added the petition file name at the end of the letter
# v.1.2.5 >> Added new get_info() function. This function runs all over the dictionary looking for specific information given the petition file name |
from django.contrib import admin
from .models import Product
# Register your models here.
class MyProduct(Product):
class Meta:
proxy = True
class MyProductAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return self.model.objects.filter(product_selected = True)
def unselect_product(MyProductAdmin,request,queryset):
for obj in queryset:
obj.product_selected=False
obj.save()
unselect_product.short_description = "Mark products will be unselected"
actions = [unselect_product]
admin.site.register(Product)
admin.site.register(MyProduct, MyProductAdmin) |
import sys
import os
import operator
from tree3 import *
from pymongo import MongoClient
from werkzeug import secure_filename
from datetime import datetime
import thread,time
import os
client = MongoClient('mongodb://localhost:27017/')
db = client.ir
path=os.getcwd()+"/JVcode/Scripts/ForClassification"
#path=os.getcwd()
def updateMongo(node,children):
#domain=[]
print node
if children != "null":
for i in range(0,len(children)):
name1=children[i][:-10]+"pdf"
print name1
res=db.papers.find_one({'filename':name1})
temp=[]
temp.append(res['_id'])
temp.append(res['domain'])
children[i]=temp
#domain.append(res['domain'])
node=node[:-10]+"pdf"
print "for ----",node," children ----",children
res=db.papers.update_one({'filename':node},{'$set':{'continuation':children}})
print res
def docReturn(name):
fd=open(path+"/"+name+".scores","r")
doc1=[]
doc2=[]
cont=[]
toRet = []
for i in fd:
line=i.split()
if name!=doc2:
doc1.append(line[0])
doc2.append(line[1])
cont.append(float(line[2]))
for i in range(0,len(cont)-1):
for j in range(0,len(cont)-1):
if float(cont[j])<float(cont[j+1]):
temp=float(cont[j])
cont[j]=float(cont[j+1])
cont[j+1]=temp
temp=doc2[j]
doc2[j]=doc2[j+1]
doc2[j+1]=temp
for i in range(0,len(cont)):
if cont[i] >= (0.3 * max(cont)):
toRet.append(doc2[i])
return toRet
"""def addtotree(tree,node,outdict):
toAdd = []
print "===============",node.v,"==========="
for i in outdict[node.v]:
stri = i+".scores"
#print stri,"\t",tree.find(stri)
if tree.find(stri) == None:
toAdd.append(stri)
print ":::::::::::::::::::::::::::::::::::::::::::::"
if len(toAdd) != 0:
tree.add(node.v,toAdd)
tree.printTree()
if (node.l != None):
addtotree(tree,node.l,outdict)
if (node.r != None):
addtotree(tree,node.r,outdict)
return tree"""
def addtotree2(tree,node,outdict):
thislevel = [node]
while thislevel:
nextlevel = []
for n in thislevel:
print "===============",n.v,"==========="
toAdd = []
print outdict[n.v]
for i in outdict[n.v]:
stri = i+".scores"
print stri,"\t",tree.find(stri)
if tree.find(stri) == None:
toAdd.append(stri)
print "To ADD::\n",toAdd
if len(toAdd) != 0:
tree.add(n.v,toAdd)
print "check -----Parent ",n.v
#print "\n\ncheck---------",n.v,n.l.v,n.r.v
temp1=[]
if n.l:
nextlevel.append(n.l)
print "check -----Left ",n.l.v
temp1.append(n.l.v)
if n.r:
nextlevel.append(n.r)
print "check -----Right ",n.v
temp1.append(n.r.v)
if len(temp1)>0:
updateMongo(n.v,temp1)
else:
updateMongo(n.v,"null")
print "NEXTLEVEL\n",nextlevel
print ":::::::::::::::::::::::::::::::::::::::::::::"
thislevel = nextlevel
return tree
path=os.getcwd()+"/JVcode/Scripts/ForClassification"
f=[]
for file1 in os.listdir(path):
if file1.endswith(".tab.scores"):
f.append(file1)
dict1=dict()
path1=os.getcwd()+"/JVcode/Scripts/ForClassification/"
for i in f:
fd=open(path1+i,"r")
dict1[i] = 0.0
for j in fd:
line = j.split()
if float(line[2]) > 0.0:
dict1[i] = dict1[i] + float(line[2])
outdict = dict()
for line in f:
ret = docReturn(line[:-7])
for j in ret:
if j != line[:-7]:
if line in outdict:
if j+".scores" in f:
outdict[line].append(j)
else:
outdict[line] = [j]
"""
for i,j in outdict.items():
print i,"\t",j
"""
basepaper = max(dict1.iteritems(), key=operator.itemgetter(1))[0]
print "BASE PAPER :: ",basepaper
tree = Tree()
tree.addroot(basepaper)
node = tree.root
print "###########################################"
tree = addtotree2(tree,node,outdict)
print "###########################################"
print "###########################################"
print "###########################################"
print "\t\tFINAL TREE"
print "###########################################"
print "###########################################"
print "###########################################"
print node.v
fdout = open(path1+"tree.txt","w")
tree.prettyprint(node,"|__",fdout)
fdout.close()
|
#!usr/bin/python
# regexlib.py
#
# Copyright 2009 ahmed youssef <xmonader@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import httplib
from myhappymapper import get_root_string, find_all, find_one
HOST="regexlib.com"
URL="http://regexlib.com/webservices.asmx"
#check: http://www.lyonsreg.com/products/python.asp
def soap_post(action, xml):
h=httplib.HTTPConnection(HOST, 80)
headers={
'Host':HOST,
'Content-Type':'text/xml; charset=utf-8',
'Content-Length':len(xml),
'SOAPAction':'"%s"' % action,
}
h.request("POST", URL, xml, headers)
result=h.getresponse().read()
return result
def create_listregexp(keyword, regex_substring, min_rating, howmanyrows):
xml="""<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<listRegExp xmlns="http://regexlib.com/webservices.asmx">
<keyword>%s</keyword>
<regexp_substring>%s</regexp_substring>
<min_rating>%d</min_rating>
<howmanyrows>%d</howmanyrows>
</listRegExp>
</soap12:Body>
</soap12:Envelope>"""%(keyword, regex_substring, min_rating, howmanyrows)
return soap_post("http://regexlib.com/webservices.asmx/listRegExp", xml)
def create_getregexpdetails(id_):
xml="""<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<getRegExpDetails xmlns="http://regexlib.com/webservices.asmx">
<regexpId>%d</regexpId>
</getRegExpDetails>
</soap12:Body>
</soap12:Envelope>"""%id_
return soap_post("http://regexlib.com/webservices.asmx/getRegExpDetails"
, xml)
def create_listallasxml(maxrows):
xml="""<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<ListAllAsXml xmlns="http://regexlib.com/webservices.asmx">
<maxrows>%d</maxrows>
</ListAllAsXml>
</soap:Body>
</soap:Envelope>"""%maxrows
return soap_post("http://regexlib.com/webservices.asmx/ListAllAsXml"
, xml)
def get_details_of_regexp(id_):
xml=create_getregexpdetails(id_)
print xml
print "*"*20
root=get_root_string(xml)
print root.prettify()
regexpdetails=find_one("getRegExpDetailsResult", root)
return regexpdetails
def get_expressions_of(keyword, regex_substring, min_rating, howmanyrows):
xml=create_listregexp(keyword, regex_substring, min_rating, howmanyrows)
root=get_root_string(xml)
lst=list(find_all("Expressions", root))
return lst if len(lst)==howmanyrows else lst[:howmanyrows]
if __name__=="__main__":
expslist=get_expressions_of("Email", "", 5, 5)
for exps in expslist:
print "*"*20
print exps.Pattern
|
# Order of People Heights
# https://www.interviewbit.com/problems/order-of-people-heights/
#
# You are given the following :
#
# A positive number N
# Heights : A list of heights of N persons standing in a queue
# Infronts : A list of numbers corresponding to each person (P) that gives the number of
# persons who are taller than P and standing in front of P
# You need to return list of actual order of persons’s height
#
# Consider that heights will be unique
#
# Example
#
# Input :
# Heights: 5 3 2 6 1 4
# InFronts: 0 1 2 0 3 2
# Output :
# actual order is: 5 3 2 1 6 4
# So, you can see that for the person with height 5, there is no one taller than him who is in
# front of him, and hence Infronts has 0 for him.
#
# For person with height 3, there is 1 person ( Height : 5 ) in front of him who is taller than him.
#
# You can do similar inference for other people in the list.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
# @param A : list of integers
# @param B : list of integers
# @return a list of integers
def order(self, A, B):
sorted_indices = sorted(range(len(A)), key=lambda i: A[i], reverse=True)
result = []
result.append(A[sorted_indices[0]])
for idx in sorted_indices[1:]:
result.insert(B[idx], A[idx])
return result
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # |
#!/user/bin/python2
# -*- coding: utf-8 -*-
import random
import math
from ml_base import ml_base
from evaluator import evaluator
class logistic_regressor(ml_base):
def __init__(self, dataset, test_rate, train_round=300,
normalization=None, batch_size=32, learning_rate=0.005):
ml_base.__init__(self, dataset, test_rate)
self.train_round = train_round
self.normalization = normalization
self.learning_rate = learning_rate
self.batch_size = batch_size
self.feature_num = len(self.train_data[0])
self.w = [random.random() for i in range(self.feature_num)]
def get_loss(self):
loss = 0
for i in xrange(len(self.train_data)):
z = self.sigmoid(
sum(map(lambda a, b: a * b, self.w, self.train_data[i])))
loss += self.train_label[i] * math.log(z) + (
1 - self.train_label[i]) * math.log(1 - z + 0.00000001)
return loss / len(self.train_data)
def train_gd(self):
for i in xrange(self.train_round):
delta = [0 for ii in xrange(self.feature_num)]
for j in xrange(len(self.train_data)):
for k in xrange(self.feature_num):
z = self.sigmoid(
sum(map(lambda x, y: x * y,
self.w, self.train_data[j])))
delta[k] += (
z - self.train_label[j]) * self.train_data[j][k]
for k in xrange(self.feature_num):
self.w[k] -= delta[k] * self.learning_rate
if i % 100 == 0:
print 'Round %d, loss:%.5f' % (i, self.get_loss())
def train_sgd(self):
for i in xrange(self.train_round):
data = self.train_data
random.shuffle(data)
delta = [0 for ii in xrange(self.feature_num)]
for j in xrange(len(data)):
for k in xrange(self.feature_num):
z = self.sigmoid(
sum(map(lambda x, y: x * y, self.w, data[j])))
delta[k] += (z - self.train_label[j]) * data[j][k]
if j % self.batch_size == 0 and j != 0:
for k in xrange(self.feature_num):
self.w[k] -= delta[k] * self.learning_rate
delta = [0 for ii in xrange(self.feature_num)]
if i % 100 == 0:
print 'Round %d, loss:%.5f' % (i, self.get_loss())
def get_pred(self):
pred = []
for i in xrange(len(self.test_data)):
temp = 0
for j in xrange(self.feature_num):
temp += self.w[j] * self.test_data[i][j]
pred.append(self.sigmoid(temp))
return pred
if __name__ == '__main__':
ml = logistic_regressor('car', 0.2)
ml.train_gd()
pred = ml.get_pred()
lr_eval = evaluator(ml.test_label, pred, 'logistic regression')
lr_eval.plot_roc()
|
import time
import unittest
from robot import COAST
from robot.robot import Robot
from tests.mock_robotd import MockRobotDFactoryMixin
class MotorBoardTest(MockRobotDFactoryMixin, unittest.TestCase):
def setUp(self):
mock = self.create_mock_robotd()
mock.new_powerboard()
time.sleep(0.2)
self.mock = mock
self.robot = Robot(robotd_path=mock.root_dir, wait_for_start_button=False)
def test_insert_motorboards(self):
self.mock.new_motorboard('ABC')
self.mock.new_motorboard('DEF')
# Give it a tiny bit to init the boards
time.sleep(0.4)
boards = self.robot.motor_boards
# Check all the motor boards are initialised and can be indexed
self.assertTrue(0 in boards)
self.assertTrue(1 in boards)
self.assertTrue('ABC' in boards)
self.assertTrue('DEF' in boards)
def test_remove_motorboard_recovery(self):
mock_motor = self.mock.new_motorboard('ABC')
# Give it a tiny bit to init the boards
time.sleep(0.4)
boards = self.robot.motor_boards
# Get the board
board = boards[0]
self.mock.remove_board(mock_motor)
with self.assertRaises(ConnectionError):
board.m0 = 1
# Re-add it
self.mock.new_motorboard('ABC')
time.sleep(0.2)
board.m0 = 1
def test_two_clients(self):
""" Checks you can interface a motor board multiple times"""
# TODO make this test generic to the board, so it runs on all boards.
self.mock.new_motorboard('ABC')
# Set up robot 2!
robot2 = Robot(robotd_path=self.mock.root_dir, wait_for_start_button=False)
# Give it a tiny bit to init the boards
time.sleep(0.2)
self.robot.motor_boards[0].m0 = 1
self.robot.motor_boards[0].m0 = -1
robot2.motor_boards[0].m0 = 1
robot2.motor_boards[0].m0 = -1
def test_multiple_indexes(self):
""" Checks you can index motor boards plenty of times"""
self.mock.new_motorboard('ABC')
# Give it a tiny bit to init the boards
time.sleep(0.2)
# Check all the motor boards are initialised and can be indexed
for i in range(10):
self.assertTrue(0 in self.robot.motor_boards)
def test_set_edge_conditions(self):
board = self.mock.new_motorboard()
time.sleep(0.2)
for motor in ['m0', 'm1']:
self._try_power(motor, board, 1.0)
self._try_power(motor, board, 1)
self._try_power(motor, board, 0.002)
self._try_power(motor, board, -1)
# Invalid error
with self.assertRaises(ValueError):
self._try_power(motor, board, -1.01)
# Brake and coast
self._try_power(motor, board, COAST)
# 0 should be BRAKE
self._try_power_expect(motor, board, 0, 'brake')
def _try_power(self, motor, board, value):
self._try_power_expect(motor, board, value, value)
def _try_power_expect(self, motor, board, value, expect):
if motor == 'm0':
self.robot.motor_boards[0].m0 = value
elif motor == 'm1':
self.robot.motor_boards[0].m1 = value
else:
raise ValueError()
got_value = board.message_queue.get()
# Test the motor board got what it expected
self.assertEqual(got_value, {motor: expect})
# Test the value can be read
if motor == 'm0':
self.assertEqual(self.robot.motor_boards[0].m0, value)
elif motor == 'm1':
self.assertEqual(self.robot.motor_boards[0].m1, value)
else:
raise ValueError()
|
from .lut_tools import (
create_dolby_vision_config,
read_cal_file,
read_cube_file,
unity_lut_1d,
unity_lut_3d,
write_dolby_vision_config,
)
from .webos_client import PyLGTVCmdException, PyLGTVPairException, WebOsClient
__all__ = [
"create_dolby_vision_config",
"read_cal_file",
"read_cube_file",
"unity_lut_1d",
"unity_lut_3d",
"write_dolby_vision_config",
"PyLGTVCmdException",
"PyLGTVPairException",
"WebOsClient",
]
|
from bs4 import BeautifulSoup
import requests
import os
def visit_page(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'
}
r = requests.get(url, headers = headers)
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'lxml')
return soup
def get_article_urls(page):
articles = page.select('body > div.p_list > div.wrapper1200 > div > section.main.fl > div.con > div.bd > ul > li > p > a')
collect = []
for article in articles:
data = {
'title': article.get_text(),
'url': 'http://laotiese.org' + article.get('href')
}
collect.append(data)
return collect
def get_article(articles_urls):
for index, article in enumerate(articles_urls):
article_page = visit_page(article['url'])
title = article['title']
content = article_page.select('#content_news')[0].get_text()
with open('./articles/' + title + '.txt', 'w+') as f:
print('正在下载第' + str(index + 1) + '/' + str(len(articles_urls)) + '篇文章...')
f.write(content)
PAGE = visit_page('http://laotiese.org/txtsex/index.html')
ARTICLE_URLS = get_article_urls(PAGE)
get_article(ARTICLE_URLS)
|
# Copyright 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the error.py module."""
import inspect
import pickle
import unittest
import error
class PickleTests(unittest.TestCase):
"""Make sure all our custom exceptions can be pickled."""
def getExceptions(self):
"""Return all our custom exceptions."""
for name in dir(error):
cls = getattr(error, name)
if isinstance(cls, type) and issubclass(cls, Exception):
yield cls
def testExceptionLookup(self):
"""Make sure our introspection logic works."""
classes = list(self.getExceptions())
self.assertIn(error.HookError, classes)
# Don't assert the exact number to avoid being a change-detector test.
self.assertGreater(len(classes), 10)
def testPickle(self):
"""Try to pickle all the exceptions."""
for cls in self.getExceptions():
args = inspect.getfullargspec(cls.__init__).args[1:]
obj = cls(*args)
p = pickle.dumps(obj)
try:
newobj = pickle.loads(p)
except Exception as e: # pylint: disable=broad-except
self.fail('Class %s is unable to be pickled: %s\n'
'Incomplete super().__init__(...) call?' % (cls, e))
self.assertIsInstance(newobj, cls)
self.assertEqual(str(obj), str(newobj))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.