content stringlengths 5 1.05M |
|---|
import torch
from torch.nn import Module
class SDFLoss(Module):
'''
Signed Distance Function
Points inside the domain have a norm < 1.0
Points outside the domain have a norm > 1.0
norm = ||x_i||_2^2
norm [norm < 1.0] = 0
mean(norm)
'''
def __init__(self):
super().__init__()
self.register_buffer('one', torch.tensor(1.0))
self.register_buffer('zero', torch.tensor(0.0))
def forward(self, points):
norm = points.pow(2).sum(-1)
sdf = torch.max(norm - self.one, self.zero) # set norm for points inside = 0
return sdf.mean()
|
# -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
import os, sys
from setuptools import setup, find_packages
def read_requirements():
"""parse requirements from requirements.txt."""
reqs_path = os.path.join("", "requirements.txt")
with open(reqs_path, "r") as f:
requirements = [line.rstrip() for line in f]
return requirements
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='dnorm_j',
version='0.1.0',
description='Japanese disease normalizer',
long_description=readme,
author='Shogo Ujiie',
author_email='ujiie@is.naist.jp',
url='https://github.com/sociocom/DNorm-J',
license=license,
install_requires=read_requirements(),
packages=find_packages(exclude=('tests', 'docs'))
)
|
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash import Dash
from dash.dependencies import Input, Output
import stock_pattern_analyzer as spa
from dash_app_functions import get_search_window_sizes, get_symbols, search_most_recent
app = Dash(__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}])
app.title = "Stock Patterns"
server = app.server
##### Header #####
header_div = html.Div([html.Div([html.H3("📈")], className="one-third column"),
html.Div([html.Div([html.H3("Stock Patterns", style={"margin-bottom": "0px"}),
html.H5("Find historical patterns and use for forecasting",
style={"margin-top": "0px"})])],
className="one-half column", id="title"),
html.Div([html.A(html.Button("Gabor Vecsei"), href="https://www.gaborvecsei.com/")],
className="one-third column",
id="learn-more-button")],
id="header", className="row flex-display", style={"margin-bottom": "25px"})
##### Explanation #####
explanation_div = html.Div([dcc.Markdown("""Select a stock symbol and a time-frame. This tools finds similar patterns in
historical data.
The most similar patters are visualized with an extended *time-frame/'future data'*, which can be an
indication of future price movement for the selected (anchor) stock.""")])
##### Settings container #####
symbol_dropdown_id = "id-symbol-dropdown"
available_symbols = get_symbols()
default_symbol = "AAPL" if "AAPL" in available_symbols else available_symbols[0]
symbol_dropdown = dcc.Dropdown(id=symbol_dropdown_id,
options=[{"label": x, "value": x} for x in available_symbols],
multi=False,
value=default_symbol,
className="dcc_control")
window_size_dropdown_id = "id-window-size-dropdown"
window_sizes = get_search_window_sizes()
window_size_dropdown = dcc.Dropdown(id=window_size_dropdown_id,
options=[{"label": f"{x} days", "value": x} for x in window_sizes],
multi=False,
value=window_sizes[2],
className="dcc_control")
future_size_input_id = "id-future-size-input"
MAX_FUTURE_WINDOW_SIZE = 10
future_size_input = dcc.Input(id=future_size_input_id, type="number", min=0, max=MAX_FUTURE_WINDOW_SIZE, value=5,
className="dcc_control")
top_k_input_id = "id-top-k-input"
MAX_TOP_K_VALUE = 10
top_k_input = dcc.Input(id=top_k_input_id, type="number", min=0, max=MAX_TOP_K_VALUE, value=5, className="dcc_control")
offset_checkbox_id = "id-offset-checkbox"
offset_checkbox = dcc.Checklist(id=offset_checkbox_id, options=[{"label": "Use Offset", "value": "offset"}],
value=["offset"], className="dcc_control")
settings_div = html.Div([html.P("Symbol (anchor)", className="control_label"),
symbol_dropdown,
html.P("Search window size", className="control_label"),
window_size_dropdown,
html.P(f"Future window size (max. {MAX_FUTURE_WINDOW_SIZE})", className="control_label"),
future_size_input,
html.P(f"Patterns to match (max. {MAX_TOP_K_VALUE})", className="control_label"),
top_k_input,
html.P("Offset the matched patterns for easy comparison (to the anchors last market close)",
className="control_label"),
offset_checkbox],
className="pretty_container three columns",
id="id-settings-div")
##### Stats & Graph #####
graph_id = "id-graph"
stats_and_graph_div = html.Div([html.Div(id="id-stats-container", className="row container-display"),
html.Div([dcc.Graph(id=graph_id)], id="id-graph-div", className="pretty_container")],
id="id-graph-container", className="nine columns")
##### Matched Stocks List #####
matched_table_id = "id-matched-list"
table_columns = ["Index",
"Match distance",
"Symbol",
"Pattern Start Date",
"Pattern End Date",
"Pattern Start Close Value ($)",
"Pattern End Close Value ($)",
"Pattern Future Close Value ($)"]
table = dash_table.DataTable(id=matched_table_id, columns=[{"id": c, "name": c} for c in table_columns], page_size=5)
matched_div = html.Div([html.Div([html.H6("Matched (most similar) patterns"), table],
className="pretty_container")],
id="id-matched-list-container",
className="eleven columns")
##### Reference Links #####
css_link = html.A("[1] Style of the page (css)",
href="https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-oil-and-gas")
yahoo_data_link = html.A("[2] Yahoo data", href="https://finance.yahoo.com")
gabor_github_link = html.A("[3] Gabor Vecsei GitHub", href="https://github.com/gaborvecsei")
reference_links_div = html.Div([html.Div([html.H6("References"),
css_link,
html.Br(),
yahoo_data_link,
html.Br(),
gabor_github_link],
className="pretty_container")],
className="four columns")
##### Layout #####
app.layout = html.Div([header_div,
explanation_div,
html.Div([settings_div,
stats_and_graph_div],
className="row flex-display"),
html.Div([matched_div], className="row flex-display"),
reference_links_div],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"})
##### Callbacks #####
@app.callback([Output(graph_id, "figure"),
Output(matched_table_id, "data")],
[Input(symbol_dropdown_id, "value"),
Input(window_size_dropdown_id, "value"),
Input(future_size_input_id, "value"),
Input(top_k_input_id, "value"),
Input(offset_checkbox_id, "value")])
def update_plot_and_table(symbol_value, window_size_value, future_size_value, top_k_value, checkbox_value):
# RetAPI search
ret = search_most_recent(symbol=symbol_value,
window_size=window_size_value,
top_k=top_k_value,
future_size=future_size_value)
# Parse response and build the HTML table rows
table_rows = []
values = []
symbols = []
start_end_dates = []
for i, match in enumerate(ret.matches):
values.append(match.values)
symbols.append(match.symbol)
start_end_dates.append((match.start_date, match.end_date))
row_values = [i + 1,
match.distance,
match.symbol,
match.end_date,
match.start_date,
match.values[-1],
match.values[window_size_value - 1],
match.values[0]]
row_dict = {c: v for c, v in zip(table_columns, row_values)}
table_rows.append(row_dict)
offset_traces = False if len(checkbox_value) == 0 else True
# Visualize the data on a graph
fig = spa.visualize_graph(match_values_list=values,
match_symbols=symbols,
match_str_dates=start_end_dates,
window_size=window_size_value,
future_size=future_size_value,
anchor_symbol=ret.anchor_symbol,
anchor_values=ret.anchor_values,
show_legend=False,
offset_traces=offset_traces)
return fig, table_rows
if __name__ == "__main__":
app.run_server(debug=False, host="0.0.0.0")
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
import time
import sys
import os
import re
from .commands import Command
from .cn_introspect_bgp import ControlNodeInspect
from .ssh_interactive_commnds import *
from netaddr import *
from datetime import datetime
from pytz import timezone
import pytz
if __name__ == '__main__':
# Create logdir if needed
if not os.path.exists('log'):
os.mkdir('log')
# Logfile init
fd = open("log/get_cn_%s.log" % os.getpid(), 'w')
# Control node info
cn_ip = '10.84.7.42'
cn_username = 'root'
cn_password = 'c0ntrail123'
family = 'inet.0'
family_mcast = 'inetmcast.0'
# Router info
rt_ip = '10.84.7.250'
router_username = 'root'
router_password = 'Embe1mpls'
# BGP Scale test info
nh = '10.84.7.19'
oper = 'Delete'
oper = 'Add'
iterations = 1
nagents = 1
nroutes_per_agent = 20
# xmpp-source is required when running bgp_stress test remotely,
# note the adderss must be present on the remote machine's
# interface and pingable from the control node
# xmpp_src='2.2.1.1'
# Worked: 100 x 400, 400 x 40, 600 x 10, 800 x 10, 1000 x 10,
# Router init - ssh connect and login for stats gathering
rt = remoteCmdExecuter()
rt.execConnect(rt_ip, router_username, router_password)
# Control init - ssh connect and login for stats gathering
cn_shell = remoteCmdExecuter()
cn_shell.execConnect(cn_ip, cn_username, cn_password)
# Init for control node introspect queries
cn = ControlNodeInspect(cn_ip)
instance_name = 'instance1'
nagents = 100
nroutes_per_agent = 5
xmpp_src = '2.2.1.1'
# Testing...
npeers = cn.get_cn_bgp_neighbor_stats_element('count', 'xmpp', 'up')
print("no instance specified, num xmpp peers:", npeers)
#it = 'default-domain:demo:c42_t41_block42_n1:c42_t41_block42_n1'
#npeers = cn.get_cn_bgp_neighbor_stats_element('count', 'xmpp', 'up', it)
#nprefixes = cn.get_cn_routing_instance_bgp_active_paths(it, family)
#status, pending_updates = cn.get_cn_routing_instance_table_element (it, family, 'pending_updates')
# print "INST NAME:%s num xmpp peers:%s nprefixes:%s pending updates:%s"
# %(it, npeers, nprefixes, pending_updates)
nblocks = 10
ninstances = 10
# for i in range (1,ninstances+1):
for j in range(1, nblocks + 1):
for i in range(1, ninstances + 1):
iname = 'c28_t39_block%s_n%s' % (j, i)
iname = 'c42_t41_block%s_n%s' % (j, i)
instance_name = 'default-domain:demo:%s:%s' % (iname, iname)
npeers = int(cn.get_cn_bgp_neighbor_stats_element(
'count', 'xmpp', 'up', instance_name))
#prefixes, paths, primary_paths, secondary_paths and infeasible_paths
#status, nprefixes = cn.get_cn_routing_instance_table_element (instance_name, family, 'active_paths')
#status, nprefixes = cn.get_cn_routing_instance_table_element (instance_name, family, 'total_prefixes')
status, nprefixes = cn.get_cn_routing_instance_table_element(
instance_name, family, 'prefixes')
status, paths = cn.get_cn_routing_instance_table_element(
instance_name, family, 'paths')
status, primary_paths = cn.get_cn_routing_instance_table_element(
instance_name, family, 'primary_paths')
status, secondary_paths = cn.get_cn_routing_instance_table_element(
instance_name, family, 'secondary_paths')
status, infeasible_paths = cn.get_cn_routing_instance_table_element(
instance_name, family, 'infeasible_paths')
status, pending_updates = cn.get_cn_routing_instance_table_element(
instance_name, family, 'pending_updates')
print("INST NAME:%s num peers:%s nprefixes:%s paths:%s primary_paths:%s secondary_paths:%s infeasible_paths:%s pending updates:%s" % (instance_name, npeers, nprefixes, paths, primary_paths, secondary_paths, infeasible_paths, pending_updates))
# End test cleanup
fd.close()
|
from datetime import date
from dateutil.relativedelta import relativedelta
from typing import Dict, Final
import pytest
from django.core.exceptions import ValidationError
from supply_chains.test.factories import (
SupplyChainFactory,
GovDepartmentFactory,
UserFactory,
StrategicActionFactory,
StrategicActionUpdateFactory,
)
from supply_chains.models import StrategicActionUpdate, RAGRating
pytestmark = pytest.mark.django_db
Staus = StrategicActionUpdate.Status
SubDate: Final = "submission_date"
Content: Final = "content"
RagRating: Final = "implementation_rag_rating"
RagReason: Final = "reason_for_delays"
CompletionDateChangeReason: Final = "reason_for_completion_date_change"
ObjectLevelError: Final = "__all__"
ERROR_MSGS = {
SubDate: ["Missing submission_date."],
Content: ["Missing content."],
RagRating: ["Missing implementation_rag_rating."],
RagReason: ["Missing reason_for_delays."],
CompletionDateChangeReason: ["Missing reason_for_completion_date_change."],
ObjectLevelError: [""],
}
@pytest.fixture
def sau_stub(test_user):
sc_name = "carbon"
sa_name = "Source raw packaging"
sc = SupplyChainFactory(name=sc_name, gov_department=test_user.gov_department)
sa = StrategicActionFactory(name=sa_name, supply_chain=sc)
yield {
"user": test_user,
"sc_name": sc_name,
"sa_name": sa_name,
"sc": sc,
"sa": sa,
}
class TestSAUModel:
"""Test class to focus mainly on creation of SAU objects from admin panel
See RT-449 for more info.
"""
def validate(self, update, expected_errors: Dict, objects_saved: int = 0):
try:
update.full_clean()
update.save()
except ValidationError as err:
details = dict(err)
assert all(
[
[key in details for key in expected_errors],
[expected_errors[key] == details[key] for key in expected_errors],
]
)
else:
if expected_errors:
pytest.fail(f"No expections were raised out of {expected_errors}")
finally:
assert StrategicActionUpdate.objects.all().count() == objects_saved
def test_SAU_save(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
)
# Assert
self.validate(sau, {}, objects_saved=1)
def test_SAU_missing_sub_date(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
submission_date=None,
)
# Assert
self.validate(sau, {SubDate: ERROR_MSGS[SubDate]})
def test_SAU_missing_content(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
content=None,
)
# Assert
self.validate(sau, {Content: ERROR_MSGS[Content]})
def test_SAU_missing_RAG(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=None,
)
# Assert
self.validate(sau, {RagRating: ERROR_MSGS[RagRating]})
def test_SAU_RAG_reason(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
)
# Assert
self.validate(sau, {}, objects_saved=1)
def test_SAU_missing_RAG_reason(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.AMBER,
reason_for_delays=None,
)
# Assert
self.validate(sau, {RagReason: ERROR_MSGS[RagReason]})
def test_SAU_missing_RAG_reason_red(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.RED,
reason_for_delays=None,
)
# Assert
self.validate(sau, {RagReason: ERROR_MSGS[RagReason]})
def test_SAU_missing_completion_change_reason(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.RED,
reason_for_delays="some text",
changed_value_for_target_completion_date=date.today().replace(day=20)
+ relativedelta(years=1, months=1),
reason_for_completion_date_change=None,
)
# Assert
self.validate(
sau, {CompletionDateChangeReason: ERROR_MSGS[CompletionDateChangeReason]}
)
def test_back_dating_update(self, sau_stub):
# Arrange
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
reason_for_delays="some text",
date_created=date.today().replace(day=20) - relativedelta(months=3),
submission_date=date.today().replace(day=20) - relativedelta(months=3),
)
# Assert
self.validate(sau, {}, objects_saved=1)
assert (
StrategicActionUpdate.objects.given_month(
date.today().replace(day=20) - relativedelta(months=3)
).count()
== 1
)
def test_back_dating_update_in_gap(self, sau_stub):
# Arrange
StrategicActionUpdateFactory.create(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
reason_for_delays="some text",
date_created=date.today().replace(day=20) - relativedelta(months=4),
submission_date=date.today().replace(day=20) - relativedelta(months=4),
)
StrategicActionUpdateFactory.create(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
reason_for_delays="some text",
date_created=date.today().replace(day=20) - relativedelta(months=2),
submission_date=date.today().replace(day=20) - relativedelta(months=2),
)
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
reason_for_delays="some text",
date_created=date.today().replace(day=20) - relativedelta(months=3),
submission_date=date.today().replace(day=20) - relativedelta(months=3),
)
# Assert
self.validate(sau, {}, objects_saved=3)
assert (
StrategicActionUpdate.objects.given_month(
date.today().replace(day=20) - relativedelta(months=3)
).count()
== 1
)
def test_back_dating_fail(self, sau_stub):
# Arrange
sau1 = StrategicActionUpdateFactory.create(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
reason_for_delays="some text",
date_created=date.today().replace(day=20) - relativedelta(months=3),
submission_date=date.today().replace(day=20) - relativedelta(months=3),
)
# Act
new = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.IN_PROGRESS,
date_created=date.today().replace(day=20) - relativedelta(months=3),
)
# Assert
self.validate(new, {ObjectLevelError: ""}, objects_saved=1)
assert (
StrategicActionUpdate.objects.given_month(
date.today().replace(day=20) - relativedelta(months=3)
).count()
== 1
)
def test_back_dating_change_state(self, sau_stub):
# Arrange
StrategicActionUpdateFactory.create(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.IN_PROGRESS,
implementation_rag_rating=RAGRating.GREEN,
date_created=date.today().replace(day=20) - relativedelta(months=3),
)
StrategicActionUpdateFactory.create_batch(10)
# Act
sau = StrategicActionUpdate.objects.get(strategic_action=sau_stub["sa"])
sau.status = Staus.SUBMITTED
sau.submission_date = date.today().replace(day=20) - relativedelta(months=3)
sau.save()
# Assert
assert (
StrategicActionUpdate.objects.given_month(
date.today().replace(day=20) - relativedelta(months=3)
).count()
== 1
)
def test_back_dating_update_with_earlier_days(self, sau_stub):
"""For RT-489, specifically"""
# Arrange
StrategicActionUpdateFactory.create(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
reason_for_delays="some text",
date_created=date.today().replace(day=10) - relativedelta(months=3),
submission_date=date.today().replace(day=10) - relativedelta(months=3),
)
# Act
sau = StrategicActionUpdateFactory.build(
user=sau_stub["user"],
strategic_action=sau_stub["sa"],
supply_chain=sau_stub["sc"],
status=Staus.SUBMITTED,
implementation_rag_rating=RAGRating.GREEN,
reason_for_delays="some text",
date_created=date.today().replace(day=20) - relativedelta(months=3),
submission_date=date.today().replace(day=20) - relativedelta(months=3),
)
# Assert
self.validate(sau, {ObjectLevelError: ""}, objects_saved=1)
assert (
StrategicActionUpdate.objects.given_month(
date.today().replace(day=20) - relativedelta(months=3)
).count()
== 1
)
|
# Globals
def init():
global VERSION
VERSION = "1.3"
global APP_NAME
APP_NAME = "Raincoat"
global TORRENTS
TORRENTS = []
global APIKEY
APIKEY = ""
global JACKETT_URL
JACKETT_URL = ""
global JACKETT_INDEXER
JACKETT_INDEXER = ""
global DESC_LENGTH
DESC_LENGTH = ""
global EXCLUDE
EXCLUDE = ""
global RESULTS_LIMIT
RESULTS_LIMIT = ""
global CLIENT_URL
CLIENT_URL = ""
global DISPLAY
DISPLAY = ""
global TOR_CLIENT
TOR_CLIENT = ""
global TOR_CLIENT_USER
TOR_CLIENT_USER = ""
global TOR_CLIENT_PW
TOR_CLIENT_PW = ""
global DOWNLOAD_DIR
DOWNLOAD_DIR = ""
global CURRENT_PAGE
CURRENT_PAGE = 0
global VERBOSE_MODE
VERBOSE_MODE = False
global DOWNLOAD
DOWNLOAD = 0
global VERIFY
VERIFY = True
global TERM_FILE
TERM_FILE = None
|
#!/usr/bin/env python
import numpy
from distutils.core import setup, Extension
transformations_module = Extension(
'pybh.contrib._transformations',
#define_macros = [('MAJOR_VERSION', '1'),
# ('MINOR_VERSION', '0')],
include_dirs = [numpy.get_include()],
libraries = [],
library_dirs = [],
sources=['pybh/contrib/transformations.c'])
setup(name='pybh',
version='0.2',
description='Personal python utilities',
author='Benjamin Hepp',
author_email='benjamin.hepp@posteo.de',
license='BSD 3 License',
packages=['pybh'],
ext_modules=[transformations_module],
install_requires=[
'numpy',
'Pillow',
# Requirements for rendering
'msgpack',
'moderngl',
'pyglet',
'pyassimp',
'pywavefront',
]
)
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow.compat.v1 as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import Reco3D.lib.dataset as dataset
import Reco3D.lib.network as network
from Reco3D.lib import preprocessor
import Reco3D.lib.vis as vis
from Reco3D.lib import metrics
from PIL import Image
import argparse
import time
import math
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--path", help="The model path",
type=str, default='./models/freezed_model/epoch_299')
parser.add_argument("--data", help="Example data path",
type=str, default='./examples/chair_g')
parser.add_argument("--rnd", help="use ShapeNet dataset", action='store_true', default=False)
parser.add_argument("--test", help="Testing with Pix3D", action='store_true', default=False)
args = parser.parse_args()
return args
def load_images(img_path, n_views=1):
# load example images
print ('Loading Images at {}'.format(img_path))
filenames = list(set(os.path.join(img_path,n) for n in os.listdir(img_path) if n.endswith(".png") or n.endswith('.jpg')))
img_data = dataset.load_imgs(filenames)
# resize if the input images larger than 137
min_size, max_size = min(np.shape(img_data[0])[:2]), max(np.shape(img_data[0])[:2])
if min_size > 137:
ret = []
for i in range(np.shape(img_data)[0]):
img = Image.fromarray(img_data[i])
size = math.ceil(max_size*137/min_size)
img.thumbnail((size,size), Image.ANTIALIAS)
ret.append(img)
img_data = np.stack(ret)
print ("Loaded example")
return img_data
def main():
args = get_args()
model_dir = args.path.strip('/')
image_dir = args.data
random_data = args.rnd
test = args.test
nviews = 5
print ('Loading the model {}'.format(model_dir))
net=network.Network_restored(model_dir)
print ('Loaded the model')
if random_data:
X, Y = dataset.load_random_sample()
elif test:
X, Y = dataset.load_random_data_Pix3D()
else:
X = load_images(image_dir, n_views=nviews)
# show example image
print ('---->',X.shape)
if len(np.shape(X)) < 4:
vis.multichannel(X)
else:
vis.multichannel(X[0])
X = preprocessor.Preprocessor_npy(np.expand_dims(X,axis=0)).out_tensor
print (X.shape)
# make inference
t1 = time.time()
out = net.predict(X[:,:nviews,:,:,0:3])
t2 = time.time()
print ("Inference time {} sec".format(t2-t1))
# show inference
if test or random_data:
vis.voxel_binary(Y)
print (np.shape(out))
out = out[0]
bg = out[:,:,:0]
fg = out[:,:,:1]
fg[fg<0.3] = 0
out = np.stack([bg,fg],axis=-1)
vis.voxel_binary(out[0])
plt.show()
if __name__ == '__main__':
main()
|
#*************************************************************************
#Copyright (C) 2015 by Arash Bakhtiari
#You may not use this file except in compliance with the License.
#You obtain a copy of the License in the LICENSE file.
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#*************************************************************************
import os
import subprocess
import math
import sys
from collections import OrderedDict
import utils
def generate_command_args(tl_list, \
dt_list, \
tn_list, \
de_list, \
q_list, \
np_list, \
nt_list, \
num_steps):
EXEC = os.path.join(utils.TBSLAS_EXAMPLES_BIN_DIR, "advection")
# generate a dictionary data type of commands
cmd_args = OrderedDict()
cmd_id = 1;
for counter in range(0,num_steps):
ARGS = ['-N' , str(8**math.ceil(math.log(np_list[counter],8))), \
'-tol' , str(tl_list[counter]), \
'-q' , str(q_list[counter]), \
'-dt' , str(dt_list[counter]), \
'-tn' , str(tn_list[counter]), \
'-d' , str(de_list[counter]), \
'-omp' , str(nt_list[counter]), \
# '-vs' , str(1), \
'-test', '2']
cmd_args[cmd_id] = utils.determine_command_prefix(np_list[counter]) + [EXEC] + ARGS
cmd_id = cmd_id + 1
return cmd_args
def test1():
mpi_num_procs, omp_num_threads = utils.parse_args()
############################################################################
# TEST 1: TEMPORAL/SPATIAL ERROR
############################################################################
num_steps = 6
# TREE TOLERANCE
tl_factor = 1
tl_init = 1e-10
tl_list = [tl_init*math.pow(tl_factor,float(cnt)) for cnt in range(0, num_steps)]
# TREE DEPTH
# de_factor = 0
# de_init = 5
# de_list = [de_init+cnt*de_factor for cnt in range(0, num_steps)]
de_list = [5,5,5,7,7,7]
# TIME RESOLUTION
dt_factor = 1
dt_init = 6.28*1e-2
dt_list = [dt_init*math.pow(dt_factor,float(cnt)) for cnt in range(0, num_steps)]
# NUM TIME STEPS
NUM_ROT = 5
T_END = 6.28*NUM_ROT
tn_factor = 1
tn_init = T_END/dt_init
tn_list = [tn_init*math.pow(tn_factor,float(cnt)) for cnt in range(0, num_steps)]
# CHEBYSHEV DEGREE
# q_factor = 1;
# q_init = 2;
# q_list = [q_init*math.pow(q_factor,float(cnt)) for cnt in range(0, num_steps)]
q_list = [3,8,14,3,8,14]
# NUM MPI PROCESSES
np_list = [mpi_num_procs for cnt in range(0, num_steps)]
# NUM OMP THREADS
nt_list = [omp_num_threads for cnt in range(0, num_steps)]
cmd_args = generate_command_args(tl_list,\
dt_list,\
tn_list,\
de_list,\
q_list, \
np_list,\
nt_list,\
num_steps)
utils.execute_commands(cmd_args,'table1-ROT'+str(NUM_ROT))
################################################################################
# MAIN
################################################################################
if __name__ == '__main__':
tbslas_dir = os.environ['TBSLAS_RESULT_DIR']
import time
TIMESTR = time.strftime("%Y%m%d-%H%M%S-")+str(time.time())
os.environ['TBSLAS_RESULT_DIR'] = os.path.join(tbslas_dir,'conv-zalesak-'+TIMESTR)
if not os.path.exists(os.environ['TBSLAS_RESULT_DIR']):
os.makedirs(os.environ['TBSLAS_RESULT_DIR'])
test1()
os.environ['TBSLAS_RESULT_DIR'] = tbslas_dir
|
#!/usr/bin/env python
import os
import csv
import sys
writer = csv.DictWriter(sys.stdout, fieldnames=['photo_id', 'image'])
writer.writeheader()
for line in open('ocr/backing-images.urls.txt'):
image_file, url = line.strip().split('\t')
photo_id, _ = os.path.splitext(os.path.basename(image_file))
writer.writerow({'photo_id': photo_id, 'image': image_file})
|
from django.test import TestCase
from pages import display, models
class Fields(TestCase):
fixtures = []
def test_attribute_error(self):
# noinspection PyTypeChecker
page = display.Page(None, {})
with self.assertRaises(AttributeError):
_ = page.bad_attr
class Page(TestCase):
def test_plain_field(self):
page = models.Page.objects.create(
name='some page', slug='test', h1='test h1'
)
self.assertEqual(page.display.h1, 'test h1')
def test_db_template(self):
template = models.PageTemplate.objects.create(
name='test',
h1='{{ page.name }} - купить в СПб',
)
page = models.Page.objects.create(
name='different page', template=template
)
self.assertEqual(page.display.h1, 'different page - купить в СПб')
def test_context_setter(self):
template = models.PageTemplate.objects.create(
name='test',
h1='{{ some_field }}',
)
page = models.Page.objects.create(
name='different page', template=template
)
page.display = {'some_field': 'some_value'}
self.assertEqual(page.display.h1, 'some_value')
def test_attribute_uses_template(self):
template = models.PageTemplate.objects.create(
name='test',
h1='{{ page.h1 }} - template',
)
page = models.Page.objects.create(
name='different page',
h1='page h1',
template=template,
)
self.assertEqual(page.display.h1, 'page h1 - template')
def test_has_unique_context(self):
"""Two different pages should contain not overlapping display contexts."""
left_template = models.PageTemplate.objects.create(name='left', h1='{{ tag }}')
right_template = models.PageTemplate.objects.create(name='right', h1='{{ tag }}')
left = models.Page.objects.create(name='left', template=left_template)
right = models.Page.objects.create(name='right', template=right_template)
left.template.h1, right.template.h1 = '{{ tag }}', '{{ tag }}'
left.display, right.display = {'tag': 'A'}, {'tag': 'B'}
self.assertNotEqual(left.display.h1, right.display.h1)
def test_has_unique_template(self):
"""Two different pages should contain not overlapping display contexts."""
left_template = models.PageTemplate.objects.create(name='left', h1='{{ tag }}')
right_template = models.PageTemplate.objects.create(
name='right', h1='different {{ tag }}'
)
left = models.Page.objects.create(name='left', template=left_template)
right = models.Page.objects.create(name='right', template=right_template)
left.template.h1 = '{{ tag }}'
right.template.h1 = 'different {{ tag }}'
left.display, right.display = {'tag': 'A'}, {'tag': 'A'}
self.assertNotEqual(left.display.h1, right.display.h1)
|
class Dummy:
def __str__(self) -> str:
return "dummy"
d1 = Dummy()
print(d1)
|
# decomposer.py
# ALS 2017/06/01
import os
from ..obsobj import Imager
from .. import spector
from .. import imgdownload
from ..filters import surveysetup
class Decomposer(Imager):
def __init__(self, **kwargs):
"""
Decomposer, an imager operator to do image decomposition
Imager Params
-------------
/either
obj (object of class obsobj): with attributes ra, dec, dir_obj
/or
ra (float)
dec (float)
/either
dir_obj (string)
/or
dir_parent (string): attr dir_obj is set to dir_parent+'SDSSJXXXX+XXXX/'
survey (str):
survey of the photometric system
if not provided, use self.obj.survey. Raise exception if self.obj.survey does not exist.
z (float):
redshift, if not provided, use self.obj.z or self.obj.sdss.z. It does not automatically query sdss to get z. If nothing is pecified then set to -1.
center_mode='n/2' (str):
how is image center defined in case of even n, 'n/2' or 'n/2-1'. Should be set to n/2-1 if the image is downloaded from HSC quarry.
Imager Attributes
-----------------
obj (instance of objObj)
ra (float)
dec (float)
dir_obj (string)
survey (str): e.g., 'hsc'
survey of the photometric system
z (float):
redshift
pixsize (astropy angle quantity):
in unit of arcsec
pixelscale (astropy pixscale quantity):
for pixel and arcsec conversion
Decomposer Attributes
---------------------
bands (list):
e.g., ['g', 'r', 'i', 'z', 'y'] for survey = 'hsc'
"""
super(Decomposer, self).__init__(**kwargs)
self.bands = surveysetup.surveybands[self.survey]
def get_fp_stamp_psfmatched(self, band, bandto):
return self.dir_obj+'stamp-{0}_psfmt-{1}.fits'.format(band, bandto)
def get_fp_stamp_contsub(self, band, bandconti):
return self.dir_obj+'stamp-{0}_contsub-{1}.fits'.format(band, bandconti)
def make_stamp_linemap_I(self, bandline, bandconti, line='OIII5008', overwrite=False):
"""
make stamp of line map in rest frame intensity in units of [erg s-1 cm-2 arcsec-2]
Converted from stamp_linemap depending on self.z. Ready for isophotal measurements.
See make_stamp_linemap for details.
"""
raise NotImplementedError("Subclass must implement abstract method")
def make_stamp_linemap(self, bandline, bandconti, line='OIII5008', overwrite=False):
"""
make stamp of line map in observed frame flux in units of [erg s-1 cm-2]
Params
------
self
bandline (str)
bandconti (str)
line = 'OIII5008' (str)
overwrite = False (bool)
Return
------
status (bool)
Write Output
------------
e.g., stamp-OIII5008.fits
"""
raise NotImplementedError("Subclass must implement abstract method")
def make_stamp_contsub(self, band, bandconti, overwrite=True):
"""
make stamp that is continuum subtracted
Params
------
self
band (str)
bandconti (str)
overwrite=False
Return
------
status
Write Output
------------
e.g., stamp-i_contsub-z.fits (if band = 'i', bandto = 'z')
"""
raise NotImplementedError("Subclass must implement abstract method")
def make_stamp_psfmatch(self, band, bandto, overwrite=True):
"""
make stamp that has psf matched to stamp of another band
Params
------
self
band (str)
bandto (str)
overwrite=False
Return
------
status
Write Output (e.g., if band = 'i', bandto = 'z')
------------
stamp-i_psfmatched-z.fits
(possibly others)
"""
raise NotImplementedError("Subclass must implement abstract method")
def _get_conti_fnu_ratio_from_spector(self, band1, band2):
""" return fnu_band1 / fnu_band2 of the continuum from spector """
s = self._get_spector()
ratio = s.get_fnu_ratio_band1_over_band2(band1=band1, band2=band2, component='contextrp')
return ratio
def _get_spector(self):
s = spector.Spector(obj=self.obj, survey=self.survey, z=self.z)
return s
|
from nltk.tag import pos_tag
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import random
from NLPFunctions import remove_noise, get_sentence_for_model
dataset_Happy = pd.read_csv('Dataset/EmotionalHappy.csv')['Sentence']
dataset_Anger = pd.read_csv('Dataset/EmotionalAnger.csv')['Sentence']
dataset_Disgust = pd.read_csv('Dataset/EmotionalDisgust.csv')['Sentence']
dataset_Sad = pd.read_csv('Dataset/EmotionalSad.csv')['Sentence']
dataset_Shame = pd.read_csv('Dataset/EmotionalShame.csv')['Sentence']
dataset_Surprise = pd.read_csv('Dataset/EmotionalSurprise.csv')['Sentence']
stop_words = stopwords.words('english')
def dataProcessing():
#sentences = dataset['Sentence']
token_list_happy = []
token_list_anger = []
token_list_disgust = []
token_list_sad = []
token_list_shame = []
token_list_surprise = []
for sentence in dataset_Happy:
token_list_happy.append(word_tokenize(sentence))
for sentence in dataset_Anger:
token_list_anger.append(word_tokenize(sentence))
for sentence in dataset_Disgust:
token_list_disgust.append(word_tokenize(sentence))
for sentence in dataset_Sad:
token_list_sad.append(word_tokenize(sentence))
for sentence in dataset_Shame:
token_list_shame.append(word_tokenize(sentence))
for sentence in dataset_Surprise:
token_list_surprise.append(word_tokenize(sentence))
clean_token_list_happy = []
clean_token_list_anger = []
clean_token_list_disgust = []
clean_token_list_sad = []
clean_token_list_shame = []
clean_token_list_surprise = []
for tokens in token_list_happy:
clean_token_list_happy.append(remove_noise(tokens, stop_words))
for tokens in token_list_anger:
clean_token_list_anger.append(remove_noise(tokens, stop_words))
for tokens in token_list_disgust:
clean_token_list_disgust.append(remove_noise(tokens, stop_words))
for tokens in token_list_sad:
clean_token_list_sad.append(remove_noise(tokens, stop_words))
for tokens in token_list_shame:
clean_token_list_shame.append(remove_noise(tokens, stop_words))
for tokens in token_list_surprise:
clean_token_list_surprise.append(remove_noise(tokens, stop_words))
Tokens_for_model_happy = get_sentence_for_model(clean_token_list_happy)
Tokens_for_model_anger = get_sentence_for_model(clean_token_list_anger)
Tokens_for_model_disgust = get_sentence_for_model(clean_token_list_disgust)
Tokens_for_model_sad = get_sentence_for_model(clean_token_list_sad)
Tokens_for_model_shame = get_sentence_for_model(clean_token_list_shame)
Tokens_for_model_surprise = get_sentence_for_model(clean_token_list_surprise)
Happy_dataset = [(tweet_dict, "Happy")
for tweet_dict in Tokens_for_model_happy]
Anger_dataset = [(tweet_dict, "Anger")
for tweet_dict in Tokens_for_model_anger]
Disgust_dataset = [(tweet_dict, "Disgust")
for tweet_dict in Tokens_for_model_disgust]
Sad_dataset = [(tweet_dict, "Sad")
for tweet_dict in Tokens_for_model_sad]
Shame_dataset = [(tweet_dict, "Shame")
for tweet_dict in Tokens_for_model_shame]
Surprise_dataset = [(tweet_dict, "Surprise")
for tweet_dict in Tokens_for_model_surprise]
dataset = Happy_dataset + Anger_dataset + Disgust_dataset + Sad_dataset + Shame_dataset + Surprise_dataset
random.shuffle(dataset)
train_data = dataset[:1300]
return (train_data)
dataProcessing() |
from flask_restful import Resource, Api, reqparse
import json
from model.motion import MotionModel
class MotionAPI(Resource):
def get(self):
model = MotionModel()
motion = model.get_motion()
return motion, 200
def post(self):
"""
Uses the houseguard db to make a motion
:return:
"""
# Adding event to db
model = MotionModel()
model.create_motion()
return 'Complete', 200 |
import os
import sys
current_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append('../utility')
from TimeUtility import TimeUtility
import MetaTrader5 as mt5
MINUTE = 'MINUTE'
HOUR = 'HOUR'
DAY = 'DAY'
# symbol : [(mt5 timeframe constants), number, unit]
TIMEFRAME = {'M1': [mt5.TIMEFRAME_M1, 1, MINUTE],
'M5': [mt5.TIMEFRAME_M5, 5, MINUTE],
'M10': [mt5.TIMEFRAME_M10, 10, MINUTE],
'M15': [mt5.TIMEFRAME_M15, 15, MINUTE],
'M30': [mt5.TIMEFRAME_M30, 30, MINUTE],
'H1': [mt5.TIMEFRAME_H1 , 1, HOUR],
'H4': [mt5.TIMEFRAME_H4, 4, HOUR],
'H8': [mt5.TIMEFRAME_H8, 8, HOUR],
'D1': [mt5.TIMEFRAME_D1, 1, DAY]}
class Timeframe:
def __init__(self, symbol):
self.symbol = symbol.upper()
self.values = TIMEFRAME[self.symbol]
@property
def constant(self):
return self.values[0]
@property
def value(self):
return self.values[1]
@property
def unit(self):
return self.values[2]
@property
def isDay(self):
if self.unit == DAY:
return True
else:
return False
@property
def isHour(self):
if self.unit == HOUR:
return True
else:
return False
@property
def isMinute(self):
if self.unit == MINUTE:
return True
else:
return False
def deltaTime(self, multiply=1.0):
if self.unit == MINUTE:
return TimeUtility.deltaSecond(multiply * self.value * 60)
elif self.unit == HOUR:
return TimeUtility.deltaMinute(multiply * self.value * 60)
elif self.unit == DAY:
return TimeUtility.deltaHour(multiply * self.value * 24)
@property
def symbols(self):
return list(TIMEFRAME.keys())
@classmethod
def timeframes(cls):
symbols = list(TIMEFRAME.keys())
l = []
for symbol in symbols:
l.append(Timeframe(symbol))
return l
@classmethod
def load(cls, timeframe_constant):
symbols = list(TIMEFRAME.keys())
for symbol in symbols:
v = TIMEFRAME[symbol]
if v[0] == timeframe_constant:
return Timeframe(symbol)
return None |
"""
Model observing, this module is built to simulate actual observing. The
object is known, and given sight parameters, the data is given. In particular,
these functions actually give the values of terms derived from the object
model also provided.
"""
import copy
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy as ap
import astropy.units as ap_u
import astropy.coordinates as ap_coord
import Robustness as Robust
import Backend as _Backend
import data_systematization as d_systize
class Sightline():
"""
This is a sightline. It contains the information for a given sightline
through space. The sightline is always given by the RA and DEC values.
The notation for the accepted values of RA and DEC is found in the
Astropy module's :py:class:`~.astropy.coordinates.SkyCoord` class.
Attributes
----------
self.coordinates : Astropy :py:class:`~.astropy.coordinates.SkyCoord` object.
This is the sky coordinates of the sightline.
Methods
-------
sightline_parameters() : function (returns | ndarray,ndarray)
This method returns back both the sightline's center and slopes for
an actual geometrical representation of the line. Converting from
the equatorial coordinate system to the cartesian coordinate system.
"""
def __init__(self, right_ascension, declination,
SkyCoord_object=None):
"""Initialization of a sightline.
The creates the sightline's main parameters, the defineing elements
of the sightline is the location that it is throughout space. This
is a specific wrapper around :py:class:`~.astropy.coordinates.SkyCoord`.
Arguments
---------
right_ascension : string
The right ascension value for the sightline. This term must be
formatted in the Astropy :py:class:`~.astropy.coordinates.SkyCoord` format: ``00h00m00.00s``.
For the values of the seconds are decimal and may extend to any
precision.
declination : string
The declination value for the sightline. This term must be
formatted in the Astropy :py:class:`~.astropy.coordinates.SkyCoord` format: ``±00d00m00.00s``.
For the values of the seconds are decimal and may extend to any
precision.
Skycord_object : :py:class:`~.astropy.coordinates.SkyCoord` object; optional
It may be easier to also just pass an Astropy
:py:class:`~.astropy.coordinates.SkyCoord` object in
general. The other strings are ignored if it is successful.
"""
# Type check.
if (isinstance(SkyCoord_object, ap_coord.SkyCoord)):
sky_coordinates = SkyCoord_object
else:
# Type check for RA and dec before conversion
right_ascension = Robust.valid.validate_string(right_ascension)
declination = Robust.valid.validate_string(declination)
# Convert the strings to sky cords.
sky_coordinates = ap_coord.SkyCoord(right_ascension,
declination,
frame='icrs')
# Automatically calculate the wrap angle along with the radian version
# of the angles.
ra_radians = float(sky_coordinates.ra.hour * (np.pi / 12))
dec_radians = float(sky_coordinates.dec.radian)
ra_wrap_angle = _Backend.astrcoord.auto_ra_wrap_angle(ra_radians)
# Define the member arguments.
self.coordinates = sky_coordinates
self._ra_wrap_angle = ra_wrap_angle * ap_u.rad
def sightline_parameters(self):
""" This function returns the sightline linear parameters.
The sightline is by definition always parallel to the x-axis
of the object to be observed. The plane of the sky is the yz-plane
of the object. This function returns first the central defining
point, then the deltas for the equation.
Returns
-------
sightline_center : ndarray
This returns a cartsian point based on the approximation
that, if the x-axis and the r-axis are the same of cartesian
and spherical cordinates, then so too are the yz-plane and the
theta-phi plane.
sightline_slopes : ndarray
This returns the slopes of the cartesian point values given
by the center. Because of the approximation from above, it is
always [1,0,0].
Notes
-----
The coordinates of the sightline in relation to the object are as
follows:
- The x-axis of the object is equal to the r-axis of the telescope. Both pointing away from the telescope, deeper into space.
- The y-axis of the object is equal to the RA-axis/phi-axis of the
telescope, westward (as y increases, RA decreases)
- The z-axis of the object is equal to the DEC-axis of the telescope. It is also equal to the negative of the theta-axis
when it is centered on theta = pi/2. Points north-south of the
telescope.
"""
# Work in radians.
ra_radians, dec_radians = self._radianize_coordinates()
sightline_center = np.array([0, ra_radians, dec_radians])
sightline_slopes = np.array([1, 0, 0])
return sightline_center, sightline_slopes
def _radianize_coordinates(self):
"""This method returns the RA and DEC in radians.
This method converts the RA and DEC coordinate measurements into
radians for better accounting.
Returns
-------
ra_radians : float
The RA coordinate in radians.
dec_radians : float
The DEC coordinate in radians.
"""
# Change the wrapping location if necessary. Astropy requires a unit.
self.coordinates.ra.wrap_angle = self._ra_wrap_angle
ra_radians = float(self.coordinates.ra.hour * (np.pi / 12))
dec_radians = float(self.coordinates.dec.radian)
return ra_radians, dec_radians
class ProtostarModel():
"""
This is an object that represents a model of an object in space. It
contains all the required functions and parameters associated with
one of the objects that would be observed for polarimetry data.
Attributes
----------
self.coordinates : Astropy :py:class:`~.astropy.coordinates.SkyCoord` object
This is the coordinates of the object that this class defines.
self.cloud_model : function
This is an implicit function (or a numerical approximation thereof) of
the shape of the protostar cloud.
self.magnetic_field : function
This is an implicit function (or a numerical approximation thereof) of
the shape of the magnetic field.
self.density_model : function
This is an implicit function (or a numerical approximation thereof) of
the shape of the density model of the cloud.
self.polarization_model : function
This is an implicit function (or a numerical approximation thereof) of
the polarization model of the cloud.
"""
def __init__(self, coordinates, cloud_model, magnetic_field_model,
density_model=None, polarization_model=None, zeros_guess_count=100):
"""Object form of a model object to be observed.
This is the object representation of an object in the sky. The
required terms are present.
Arguments
---------
coordinates : Astropy :py:class:`~.astropy.coordinates.SkyCoord` object
These are the coordinates of the observation object. It is up
to the user to put as complete information as possible.
cloud_model : function or string,
An implicit equation of the cloud. The origin of this equation
must also be the coordinate specified by self.coordinates. Must
be cartesian in the form ``f(x,y,z) = 0``, for the function or
string is ``f(x,y,z)``. The x-axis is always aligned with a
telescope as it is the same as a telescope's r-axis.
magnetic_field_model : function or :py:class:`~.InterpolationTable`
A function that, given a single point in cartesian space, will
return the value of the magnitude of the magnetic field's three
orthogonal vectors in xyz-space. If an interpolation table is
given, a numerical approximation function will be used instead.
density_model : function or string, or :py:class:`~.InterpolationTable`; optional
A function that, given a point in cartesian space, will return
a value pertaining to the density of the gas/dust within at that
point. Defaults to uniform. If an interpolation table is
given, a numerical approximation function will be used instead.
polarization_model: function, string, float or :py:class:`~.InterpolationTable`; optional
This is the percent of polarization of the light. Either given as
a function (or string representing a function) ``f(x,y,z)``, or
as a constant float value. Default is uniform value of 1. If an
interpolation table is given, a numerical approximation function
will be used instead.
Parameters
----------
zeros_guess_count : int; optional
This value stipulates how many spread out test points there should
be when finding sightline intersection points. A higher number
should be used for complex shapes. Defaults at 100.
"""
# Initialization of boolean checks.
# Check if the user input a interpolated data table instead of a
# function. The integration method must change if so.
input_interpolated_tables = False
# Type check
if (not isinstance(coordinates, ap_coord.SkyCoord)):
raise TypeError('The input for coordinates must be an Astropy '
'SkyCord object.'
' --Kyubey')
if (callable(cloud_model)):
cloud_model = \
Robust.valid.validate_function_call(cloud_model,
n_parameters=3)
elif (isinstance(cloud_model, str)):
cloud_model = \
Robust.inparse.user_equation_parse(cloud_model,
('x', 'y', 'z'))
else:
raise TypeError('The input for the cloud equation must either '
'be a callable function or a string that can '
'be converted into an implicit callable function.'
' --Kyubey')
# Test magnetic field model.
if (callable(magnetic_field_model)):
magnetic_field_model = \
Robust.valid.validate_function_call(magnetic_field_model,
n_parameters=3)
elif (isinstance(magnetic_field_model, d_systize.InterpolationTable)):
# The user has inputted an interpolation table, record such.
input_interpolated_tables = True
if (magnetic_field_model.classification == 'vector'):
magnetic_field_model = \
magnetic_field_model.numerical_function()
else:
raise TypeError('The magnetic field lookup table must be a '
'vector based table. It is currently a '
'< {tb} > based table.'
' --Kyubey'
.format(tb=magnetic_field_model.classification))
# Test density model.
if (callable(density_model)):
density_model = \
Robust.valid.validate_function_call(density_model,
n_parameters=3)
elif (isinstance(density_model, str)):
density_model = \
Robust.inparse.user_equation_parse(density_model,
('x', 'y', 'z'))
elif (isinstance(density_model, d_systize.InterpolationTable)):
# The user has inputted an interpolation table, record such.
input_interpolated_tables = True
if (density_model.classification == 'scalar'):
density_model = density_model.numerical_function()
else:
raise TypeError('The density model lookup table must be a '
'scalar based table. It is currently a '
'< {tb} > based table.'
' --Kyubey'
.format(tb=density_model.classification))
elif (density_model is None):
# The user likely did not input a density model, the default
# is uniform distribution.
def uniform_density_function(x, y, z): return np.ones_like(x)
density_model = uniform_density_function
else:
raise TypeError('The input for the density equation must either '
'be a callable function or a string that can '
'be converted into an implicit callable function.'
' --Kyubey')
# Test polarization model factor
if (callable(polarization_model)):
polarization_model = \
Robust.valid.validate_function_call(polarization_model,
n_parameters=3)
elif (isinstance(polarization_model, str)):
polarization_model = \
Robust.inparse.user_equation_parse(polarization_model,
('x', 'y', 'z'))
elif (isinstance(polarization_model, (float, int))):
percent_polarized = float(copy.deepcopy(polarization_model))
# The user desires a constant value for the percent polarization.
def constant_function(x, y, z):
return np.full_like(x, percent_polarized)
polarization_model = constant_function
elif (isinstance(polarization_model, d_systize.InterpolationTable)):
# The user has inputted an interpolation table, record such.
input_interpolated_tables = True
if (polarization_model.classification == 'scalar'):
polarization_model = polarization_model.numerical_function()
else:
raise TypeError('The polarization model lookup table must be '
'a scalar based table. It is currently a '
'< {tb} > based table.'
' --Kyubey'
.format(tb=polarization_model.classification))
elif (polarization_model is None):
# The user likely did not input a density model, the default
# is uniform total distribution.
def uniform_polarization_function(x, y, z): return np.ones_like(x)
polarization_model = uniform_polarization_function
else:
raise TypeError('The input for the polarization model must either '
'be a callable function, a string that can '
'be converted into an implicit callable function,'
'or a constant float/int value.'
' --Kyubey')
zeros_guess_count = Robust.valid.validate_int_value(zeros_guess_count,
greater_than=0)
# Automatically calculate the wrap angle along with the radian version
# of the angles.
ra_radians = float(coordinates.ra.hour * (np.pi / 12))
dec_radians = float(coordinates.dec.radian)
ra_wrap_angle = \
_Backend.astrcoord.auto_ra_wrap_angle(ra_radians) * ap_u.rad
# All models equations must be offset by the coordinates. This
# transformation assumes the flat approximation of the astronomical
# sky.
coordinates.ra.wrap_angle = ra_wrap_angle
# Translate the cloud model function.
def translate_cloud_model(x, y, z):
return cloud_model(x, y - ra_radians, z - dec_radians)
# Translate the magnetic field function.
def translate_magnetic_field(x, y, z):
return magnetic_field_model(x, y - ra_radians, z - dec_radians)
# Translate the density model function.
def translate_density_model(x, y, z):
return density_model(x, y - ra_radians, z - dec_radians)
# Translate the polarization model function.
def translate_polarization_model(x, y, z):
return polarization_model(x, y - ra_radians, z - dec_radians)
self.coordinates = coordinates
self.cloud_model = translate_cloud_model
self.magnetic_field = translate_magnetic_field
self.density_model = translate_density_model
self.polarization_model = translate_polarization_model
self._ra_wrap_angle = ra_wrap_angle
self._interpolated_tables = input_interpolated_tables
self._zeros_guess_count = zeros_guess_count
def _radianize_coordinates(self):
"""This method returns the RA and DEC in radians.
This method converts the RA and DEC coordinate measurements into
radians for better accounting.
Returns
--------
ra_radians : float
The RA coordinate in radians.
dec_radians : float
The DEC coordinate in radians.
"""
# Change the wrapping location if necessary. Astropy requires a unit.
self.coordinates.ra.wrap_angle = self._ra_wrap_angle
ra_radians = float(self.coordinates.ra.hour * (np.pi / 12))
dec_radians = float(self.coordinates.dec.radian)
return ra_radians, dec_radians
class ObservingRun():
"""Execute a mock observing run of an object.
This class is the main model observations of an object. Taking a
central sightline and the field of view, it then gives back a set of
plots, similar to those that an observer would see after data reduction.
The class itself does the computation in its methods, returning back
a heatmap/contour object plot from the observing depending on the method.
Attributes
----------
self.observe_target : :py:class:`ProtostarModel` object
The model target for simulated observing. Conceptually, the object
that the telescope observes.
self.sightline : :py:class:`Sightline` object
The primary sightline that is used for the model observing,
conceptually where the telescope is aimed at.
self.field_of_view : float
The field of view value of the observation, given as the length
of the observing chart.
Methods
-------
Stokes_parameter_contours() : function {returns | ndarray,ndarray}
Compute the value of Stoke parameters at random sightlines from the
primary sightline and plot them. Returns the values that was used
to plot.
"""
def __init__(self, observe_target, sightline, field_of_view):
"""Doing an observing run.
Create an observing run object, compiling the primary sightline and
the field of view.
Arguments
---------
observe_target : :py:class:`ProtostarModel` object
This is the object to be observed.
sightline : Sightline object
This is the primary sightline, in essence, where the telescope
is pointing in this simulation.
field_of_view : float
The width of the sky segment that is being observed. Must be in
radians. Applies to both RA and DEC evenly for a square image.
Seen range is `` (RA,DEC) ± field_of_view/2 ``.
"""
# Basic type checking
if (not isinstance(observe_target, ProtostarModel)):
raise TypeError('The observed target must be a ProtostarModel '
'class object.'
' --Kyubey')
if (not isinstance(sightline, Sightline)):
raise TypeError('The sightline must be a Sightline class object.'
' --Kyubey')
field_of_view = Robust.valid.validate_float_value(field_of_view,
greater_than=0)
# Check if both objects have the same RA wraping angle. If not, then
# it is highly likely that the mapping will be incorrect.
if (observe_target._ra_wrap_angle != sightline._ra_wrap_angle):
Robust.kyubey_warning(Robust.AstronomyWarning,
('The RA wrapping angle for both objects '
'are different. This may result in '
'improper mapping during computations.'))
# Check if the object is actually within the field of view.
obs_target_ra_radians, obs_target_dec_radians = \
observe_target._radianize_coordinates()
sightline_ra_radians, sightline_dec_radians = \
sightline._radianize_coordinates()
if (((sightline_ra_radians - field_of_view/2)
<= obs_target_ra_radians <=
(sightline_ra_radians + field_of_view/2)) and
((sightline_dec_radians - field_of_view/2)
<= obs_target_dec_radians <=
(sightline_dec_radians + field_of_view/2))):
# If at this stage, it should be fine.
pass
else:
raise Robust.AstronomyError('Object is not within the sightline '
'and field of view. Please revise. '
' --Kyubey')
# Assign and create.
self.target = observe_target
self.sightline = sightline
self.offset = field_of_view/2
def Stokes_parameter_contours(self,
plot_parameters=True, n_axial_samples=25):
"""This function produces a contour plot of the stoke values.
This function generates a large number of random sightlines to
traceout contour information of the of the fields. From
there, is creates and returns a contour plot.
The values of the intensity, I, the two polarization values, Q,U, and
the polarization intensity, hypt(Q,U) is plotted.
Parameters
----------
plot_parameters : bool; optional
A boolean value to specify if the user wanted the parameters to be
plotted.
n_axial_samples : int; optional
The number of points along one RA or DEC axis to be sampled. The
resulting sample is a mesh n**2 between the bounds. Default is 25.
Returns
-------
ra_dec_array : tuple(ndarray)
This is a tuple of the values of the RA and DEC of the random
sightlines (arranged in parallel arrays).
stokes_parameters : tuple(ndarray)
This is a tuple of ndarrays of the stoke parameters calculated by
the random sightlines.
"""
# Type check
n_axial_samples = Robust.valid.validate_int_value(n_axial_samples,
greater_than=0)
# Make a plotting background.
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5,
figsize=(15, 2), dpi=100,
sharex=True, sharey=True)
# Extract Stokes parameter data.
stokes_parameters, ra_dec_array, _ = \
self._Stoke_parameters(n_axial_samples)
# Decompose the stokes parameters into I,Q,U,V along with the angle
# of polarization.
I, Q, U, V = stokes_parameters
polar_I = np.hypot(Q, U)
angle = _Backend.efp.angle_from_Stokes_parameters(Q, U)
# Double check if the user actually wanted them plotted.
if (plot_parameters):
# Arrange the values into plottable values. The x-axis is RA, and
# the y-axis is DEC.
x_axis_plot = ra_dec_array[0]
y_axis_plot = ra_dec_array[1]
# Color maps, each gets their own special-ish kind of color map
# depending on the plot.
intensity_maps = mpl.cm.get_cmap('inferno')
seismic_map = mpl.cm.get_cmap('seismic')
Q_polarization_map = \
_Backend.pltcust.zeroedColorMap(seismic_map, Q.min(), Q.max())
U_polarization_map = \
_Backend.pltcust.zeroedColorMap(seismic_map, U.min(), U.max())
PuOr_map = mpl.cm.get_cmap('PuOr')
angle_map = \
_Backend.pltcust.zeroedColorMap(PuOr_map,
angle.min(), angle.max())
# Extrapolate and plot a contour based on irregularly spaced data.
ax1_o = ax1.tricontourf(x_axis_plot, y_axis_plot, I, 50,
cmap=intensity_maps)
ax2_o = ax2.tricontourf(x_axis_plot, y_axis_plot, polar_I, 50,
cmap=intensity_maps)
ax3_o = ax3.tricontourf(x_axis_plot, y_axis_plot, Q, 50,
cmap=Q_polarization_map)
ax4_o = ax4.tricontourf(x_axis_plot, y_axis_plot, U, 50,
cmap=U_polarization_map)
ax5_o = ax5.tricontourf(x_axis_plot, y_axis_plot, angle, 50,
cmap=angle_map)
# Assign titles.
ax1.set_title('Total Intensity')
ax2.set_title('Polar Intensity')
ax3.set_title('Q Values')
ax4.set_title('U Values')
ax5.set_title('Angle')
# Assign a color bar legends
fig1.colorbar(ax1_o, ax=ax1)
fig1.colorbar(ax2_o, ax=ax2)
fig1.colorbar(ax3_o, ax=ax3)
fig1.colorbar(ax4_o, ax=ax4)
fig1.colorbar(ax5_o, ax=ax5)
plt.show()
# Just in case they want to play with the data.
return ra_dec_array, stokes_parameters
def _compute_integrated_intensity(self, sightline):
"""Computes the total strength of the light/E-field.
Given a sightline independent of the primary one, this function
computes the integrated value of the magnitude of the E-field. It
is assumed that the magnitude of the E-field is directly related to
energy given by the Poynting vector.
Parameters
----------
sightline : :py:class:`Sightline` object
The sightline through which the intensity will be calculated
through, using the density function.
Returns
-------
integrated_intensity : float
The total integrated intensity.
polarized_integrated_intensity : float
The total integrated intensity from polarization contribution,
given by the polarization model function.
error : float
The error of the integrated intensity.
"""
# Basic type checking.
if (not isinstance(sightline, Sightline)):
raise TypeError('The sightline must be a sightline object.'
' --Kyubey')
# Extract information about the target. The coefficient is rather
# arbitrary.
box_width = 10 * self.offset
# Extract sightline information
sightline_center, sightline_slopes = sightline.sightline_parameters()
# If the Protostar model contains an interpolation table instead of
# a normal function. Assume the usage of a Simpson's integration.
if (self.target._interpolated_tables):
integral_method = 'simpsons'
else:
integral_method = 'scipy'
# Integration function with a polarization dependence, as the amount of
# polarization influences. The polarization model must be sqrt(f(x))
# because the user expects a I_p = I_t * p, while the most efficient
# method of implementation (modifying the E-fields), produces a
# relationship of I_p = I_t * p**2.
def total_intensity(x, y, z):
total = self.target.density_model(x, y, z)
return total
def polarization_intensity(x, y, z):
total = (self.target.density_model(x, y, z)
* np.sqrt(np.abs(self.target.polarization_model(x, y, z))))
return total
# Integrate over the density function.
integrated_intensity, int_error = _Backend.cli.cloud_line_integral(
total_intensity, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
# Also find out the total polarized intensity.
polarized_integrated_intensity, pol_error = \
_Backend.cli.cloud_line_integral(
polarization_intensity, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
# Error propagates in quadrature
error = np.hypot(int_error, pol_error)
# Return
return integrated_intensity, polarized_integrated_intensity, error
def _compute_integrated_magnetic_field(self, sightline):
"""Computes total magnetic field vectors over a sightline.
Given a sightline independent of the primary one, compute the
integrated values of the magnetic field vectors. The values given
is of little importance because of their computation of an improper summation, but the angles are most important. Nonetheless, magnitude
is preserved.
Parameters
----------
sightline : :py:class:`Sightline` object
The sightline through which the magnetic fields will be calculated
through.
Returns
-------
Bfield_x_integrated : float
The total value of all x-axial magnetic field vectors added
together through the sightline and object cloud.
Bfield_y_integrated : float
The total value of all y-axial magnetic field vectors added
together through the sightline and object cloud.
Bfield_z_integrated : float
The total value of all z-axial magnetic field vectors added
together through the sightline and object cloud.
errors : ndarray
A collection of error values, parallel to the float value
collection above.
"""
# Basic type checking.
if (not isinstance(sightline, Sightline)):
raise TypeError('The sightline must be a sightline object.'
' --Kyubey')
# Extract information about the target. The coefficient is rather
# arbitrary.
box_width = 10 * self.offset
# If the Protostar model contains an interpolation table instead of
# a normal function. Assume the usage of a Simpson's integration.
if (self.target._interpolated_tables):
integral_method = 'simpsons'
else:
integral_method = 'scipy'
# Define custom functions such that integrating over a vector function
# is instead an integration over the three independent dimensions.
def target_cloud_Bfield_x(x, y, z):
return self.target.magnetic_field(x, y, z)[0]
def target_cloud_Bfield_y(x, y, z):
return self.target.magnetic_field(x, y, z)[1]
def target_cloud_Bfield_z(x, y, z):
return self.target.magnetic_field(x, y, z)[2]
# Extract sightline information
sightline_center, sightline_slopes = sightline.sightline_parameters()
# Begin computation.
Bfield_x_integrated, error_x = _Backend.cli.cloud_line_integral(
target_cloud_Bfield_x, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
Bfield_y_integrated, error_y = _Backend.cli.cloud_line_integral(
target_cloud_Bfield_y, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
Bfield_z_integrated, error_z = _Backend.cli.cloud_line_integral(
target_cloud_Bfield_z, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
error = np.array([error_x, error_y, error_z], dtype=float)
return (Bfield_x_integrated,
Bfield_y_integrated,
Bfield_z_integrated,
error)
def _Stoke_parameters(self, n_axial_samples):
"""Return the stoke parameters for a large range of random sightlines.
This function computes an entire slew of Stokes parameters by
generating random sightlines within the field of view of the primary
sightline. This function is the precursor for all of the contour plots.
Parameters
----------
n_axial_samples : int
The number of points along one RA or DEC axis to be sampled. The
resulting sample is a mesh n**2 between the bounds.
Returns
-------
stokes_parameters : ndarray
This is the array of all four Stoke parameters over all of the
random sightlines.
ra_dec_array : ndarray
This is the array of all of the random sightline's RA and DEC
values.
sightline_list : ndarray
This is an array containing all of the sightline's SkyCord objects,
just in case for whatever need.
"""
# Type checking.
n_axial_samples = Robust.valid.validate_int_value(n_axial_samples,
greater_than=0)
# Work in radians for the sightline's center.
target_ra, target_dec = self.sightline._radianize_coordinates()
# Create a large list of sightlines.
ra_range = np.linspace(target_ra - self.offset,
target_ra + self.offset,
n_axial_samples)
dec_range = np.linspace(target_dec - self.offset,
target_dec + self.offset,
n_axial_samples)
# Establish a mesh grid, the flatten to 1D arrays of points.
ra_mesh, dec_mesh = np.meshgrid(ra_range, dec_range)
ra_array = np.ravel(ra_mesh)
dec_array = np.ravel(dec_mesh)
# Compile the sightlines in a list.
sightline_list = []
for radex, decdex in zip(ra_array, dec_array):
temp_skycoord = ap_coord.SkyCoord(radex, decdex,
frame='icrs', unit='rad')
sightline_list.append(Sightline(None, None, temp_skycoord))
# It is best if it is not vectored like other numpy operations.
# Because it deals with specific classes and whatnot.
intensity_array = []
polarized_intensity = []
Bfield_x_array = []
Bfield_y_array = []
Bfield_z_array = []
error_array = []
for sightlinedex in sightline_list:
temp_intensity, temp_polarized_intensity, intensity_error = \
self._compute_integrated_intensity(sightlinedex)
Bfield_x, Bfield_y, Bfield_z, Bfield_error = \
self._compute_integrated_magnetic_field(sightlinedex)
# Append
intensity_array.append(temp_intensity)
polarized_intensity.append(temp_polarized_intensity)
Bfield_x_array.append(Bfield_x)
Bfield_y_array.append(Bfield_y)
Bfield_z_array.append(Bfield_z)
# Combine errors in quadrature.
error_array.append(np.hypot(intensity_error, Bfield_error))
# Vectorize.
intensity_array = np.array(intensity_array, dtype=float)
polarized_intensity = np.array(polarized_intensity, dtype=float)
Bfield_x_array = np.array(Bfield_x_array, dtype=float)
Bfield_y_array = np.array(Bfield_y_array, dtype=float)
Bfield_z_array = np.array(Bfield_z_array, dtype=float)
error_array = np.array(error_array, dtype=float)
# The value of Bfeild_x_array is of non-issue because of the desire
# for the Stokes parameters and the orientation of the coordinate
# system.
del Bfield_x_array, Bfield_x
# Convert the magnetic fields to electric fields. Because the strength
# of the magnetic field is independent of the strength of the E field
# through the virtue of the reflecting dust grains, scale by intensity.
Efield_y_array_norm, Efield_z_array_norm = \
_Backend.efp.magnetic_to_electric(Bfield_y_array, Bfield_z_array,
normalize=True)
Efield_y_array = Efield_y_array_norm * intensity_array
Efield_z_array = Efield_z_array_norm * intensity_array
# Polarized light.
Efield_y_array_polar = Efield_y_array_norm * polarized_intensity
Efield_z_array_polar = Efield_z_array_norm * polarized_intensity
# Get all of the Stokes parameters.
I, Q, U, V = \
_Backend.efp.Stokes_parameters_from_field(
Efield_y_array_polar, Efield_z_array_polar)
# Total intensity is actually from the Efield of the total light, not
# just polarized light.
I = Efield_y_array**2 + Efield_z_array**2
# Return all of the parameters as this is a hidden function. The front
# end contour functions take care of producing only the one the user
# wants. Also, return back the sightline locations.
stokes_parameters = (I, Q, U, V)
ra_dec_array = (ra_array, dec_array)
return stokes_parameters, ra_dec_array, sightline_list
|
# Generated by Django 2.0.3 on 2019-02-15 08:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Slice',
fields=[
('basemodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.BaseModel')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('description', models.CharField(max_length=200, verbose_name='Description')),
('image', models.ImageField(upload_to='img/slice', verbose_name='Image')),
],
bases=('core.basemodel',),
),
migrations.CreateModel(
name='Tutor',
fields=[
('basemodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.BaseModel')),
('name', models.CharField(max_length=50, verbose_name='Name')),
('rol', models.CharField(max_length=50, verbose_name='Occupation')),
('description', models.CharField(default='', max_length=100, verbose_name='Quick Description')),
('image', models.ImageField(blank=True, null=True, upload_to='img/tutors', verbose_name='Profile Image')),
('facebook', models.URLField(blank=True, null=True, verbose_name='Facebook Link')),
('twitter', models.URLField(blank=True, null=True, verbose_name='Twitter Link')),
('instagram', models.URLField(blank=True, null=True, verbose_name='Instagram Link')),
],
bases=('core.basemodel',),
),
]
|
def main():
money_received = int(input("Money received: "))
price = int(input("Price: "))
change = money_received - price
result = {100000: 0, 50000: 0, 20000: 0, 10000: 0, 5000: 0, 2000: 0, 1000: 0}
for key in result.keys():
calculation = int(change / key)
if calculation:
result[key] = calculation
change -= key * calculation
print(result)
if __name__ == '__main__':
main()
|
import sys
import numpy as np
print("Python version: "+sys.version)
print("Numpy version: "+np.__version__)
|
'''
bilibili spider init file
'''
from .SpiderModule import bilibili_spider
from .ProcessRawModule import process_run_main
from .MasModule import mas_get_html
|
# -*- coding:utf-8; -*-
class Solution:
def removeDuplicates(self, nums):
lastUniq = 0 # 前面唯一序列的最后一个元素
for i, v in enumerate(nums[1:]):
if v != nums[lastUniq]:
lastUniq += 1
nums[lastUniq] = v
return lastUniq + 1
|
import time
import random
import sys
Wuerfel = ['1','2','3','4','5','6']
time.sleep(0.5)
ergebnis = random.choice(Wuerfel)
print (ergebnis)
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for serializing TensorFlow computations."""
import os
import os.path
import shutil
import tempfile
import types
from typing import Dict, Optional, Set, MutableSequence
import zipfile
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl.context_stack import context_stack_base
from tensorflow_federated.python.core.impl.tensorflow_context import tensorflow_computation_context
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.utils import function_utils
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
from tensorflow_federated.python.tensorflow_libs import variable_utils
class SerializationError(Exception):
"""Error raised during value serialization or deserialization."""
pass
def finalize_binding(binding, tensor_info_map):
"""Mutates binding by filling in actual tensor names.
Args:
binding: A `pb.Binding` or one of its submessages.
tensor_info_map: A dict mapping the placeholder `tensor_name`s found in
`binding` to final tensor names.
"""
if not binding:
if tensor_info_map:
raise ValueError('Empty binding, but non-empty tensor_info_map {}'.format(
tensor_info_map))
return
if isinstance(binding, pb.TensorFlow.Binding):
sub_binding = getattr(binding, binding.WhichOneof('binding'))
finalize_binding(sub_binding, tensor_info_map)
elif isinstance(binding, pb.TensorFlow.TensorBinding):
name = binding.tensor_name
if name not in tensor_info_map:
raise ValueError(
'Did not find tensor_name {} in provided tensor_info_map with keys {}'
.format(name, list(tensor_info_map.keys())))
binding.tensor_name = tensor_info_map[name].name
elif isinstance(binding, pb.TensorFlow.StructBinding):
for sub_binding in binding.element:
finalize_binding(sub_binding, tensor_info_map)
else:
raise ValueError('Unsupported binding type {}'.format(
py_typecheck.type_string(type(binding))))
def serialize_tf2_as_tf_computation(target, parameter_type, unpack=None):
"""Serializes the 'target' as a TF computation with a given parameter type.
Args:
target: The entity to convert into and serialize as a TF computation. This
can currently only be a Python function or `tf.function`, with arguments
matching the 'parameter_type'.
parameter_type: The parameter type specification if the target accepts a
parameter, or `None` if the target doesn't declare any parameters. Either
an instance of `types.Type`, or something that's convertible to it by
`types.to_type()`.
unpack: Whether to always unpack the parameter_type. Necessary for support
of polymorphic tf2_computations.
Returns:
The constructed `pb.Computation` instance with the `pb.TensorFlow` variant
set.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the signature of the target is not compatible with the given
parameter type.
"""
py_typecheck.check_callable(target)
parameter_type = computation_types.to_type(parameter_type)
signature = function_utils.get_signature(target)
if signature.parameters and parameter_type is None:
raise ValueError(
'Expected the target to declare no parameters, found {!r}.'.format(
signature.parameters))
# In the codepath for TF V1 based serialization (tff.tf_computation),
# we get the "wrapped" function to serialize. Here, target is the
# raw function to be wrapped; however, we still need to know if
# the parameter_type should be unpacked into multiple args and kwargs
# in order to construct the TensorSpecs to be passed in the call
# to get_concrete_fn below.
unpack = function_utils.infer_unpack_needed(target, parameter_type, unpack)
arg_typespecs, kwarg_typespecs, parameter_binding = (
tensorflow_utils.get_tf_typespec_and_binding(
parameter_type,
arg_names=list(signature.parameters.keys()),
unpack=unpack))
# Pseudo-global to be appended to once when target_poly below is traced.
type_and_binding_slot = []
# N.B. To serialize a tf.function or eager python code,
# the return type must be a flat list, tuple, or dict. However, the
# tff.tf_computation must be able to handle structured inputs and outputs.
# Thus, we intercept the result of calling the original target fn, introspect
# its structure to create a result_type and bindings, and then return a
# flat dict output. It is this new "unpacked" tf.function that we will
# serialize using tf.saved_model.save.
#
# TODO(b/117428091): The return type limitation is primarily a limitation of
# SignatureDefs and therefore of the signatures argument to
# tf.saved_model.save. tf.functions attached to objects and loaded back with
# tf.saved_model.load can take/return nests; this might offer a better
# approach to the one taken here.
@tf.function
def target_poly(*args, **kwargs):
result = target(*args, **kwargs)
result_dict, result_type, result_binding = (
tensorflow_utils.get_tf2_result_dict_and_binding(result))
assert not type_and_binding_slot
# A "side channel" python output.
type_and_binding_slot.append((result_type, result_binding))
return result_dict
# Triggers tracing so that type_and_binding_slot is filled.
cc_fn = target_poly.get_concrete_function(*arg_typespecs, **kwarg_typespecs)
assert len(type_and_binding_slot) == 1
result_type, result_binding = type_and_binding_slot[0]
# N.B. Note that cc_fn does *not* accept the same args and kwargs as the
# Python target_poly; instead, it must be called with **kwargs based on the
# unique names embedded in the TensorSpecs inside arg_typespecs and
# kwarg_typespecs. The (preliminary) parameter_binding tracks the mapping
# between these tensor names and the components of the (possibly nested) TFF
# input type. When cc_fn is serialized, concrete tensors for each input are
# introduced, and the call finalize_binding(parameter_binding,
# sigs['serving_default'].inputs) updates the bindings to reference these
# concrete tensors.
# Associate vars with unique names and explicitly attach to the Checkpoint:
var_dict = {
'var{:02d}'.format(i): v for i, v in enumerate(cc_fn.graph.variables)
}
saveable = tf.train.Checkpoint(fn=target_poly, **var_dict)
try:
# TODO(b/122081673): All we really need is the meta graph def, we could
# probably just load that directly, e.g., using parse_saved_model from
# tensorflow/python/saved_model/loader_impl.py, but I'm not sure we want to
# depend on that presumably non-public symbol. Perhaps TF can expose a way
# to just get the MetaGraphDef directly without saving to a tempfile? This
# looks like a small change to v2.saved_model.save().
outdir = tempfile.mkdtemp('savedmodel')
tf.saved_model.save(saveable, outdir, signatures=cc_fn)
graph = tf.Graph()
with tf.compat.v1.Session(graph=graph) as sess:
mgd = tf.compat.v1.saved_model.load(
sess, tags=[tf.saved_model.SERVING], export_dir=outdir)
finally:
shutil.rmtree(outdir)
sigs = mgd.signature_def
# TODO(b/123102455): Figure out how to support the init_op. The meta graph def
# contains sigs['__saved_model_init_op'].outputs['__saved_model_init_op']. It
# probably won't do what we want, because it will want to read from
# Checkpoints, not just run Variable initializerse (?). The right solution may
# be to grab the target_poly.get_initialization_function(), and save a sig for
# that.
# Now, traverse the signature from the MetaGraphDef to find
# find the actual tensor names and write them into the bindings.
finalize_binding(parameter_binding, sigs['serving_default'].inputs)
finalize_binding(result_binding, sigs['serving_default'].outputs)
annotated_type = computation_types.FunctionType(parameter_type, result_type)
return pb.Computation(
type=pb.Type(
function=pb.FunctionType(
parameter=type_serialization.serialize_type(parameter_type),
result=type_serialization.serialize_type(result_type))),
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(mgd.graph_def),
parameter=parameter_binding,
result=result_binding)), annotated_type
def serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):
"""Serializes the 'target' as a TF computation with a given parameter type.
See also `serialize_tf2_as_tf_computation` for TensorFlow 2
serialization.
Args:
target: The entity to convert into and serialize as a TF computation. This
can currently only be a Python function. In the future, we will add here
support for serializing the various kinds of non-eager and eager
functions, and eventually aim at full support for and compliance with TF
2.0. This function is currently required to declare either zero parameters
if `parameter_type` is `None`, or exactly one parameter if it's not
`None`. The nested structure of this parameter must correspond to the
structure of the 'parameter_type'. In the future, we may support targets
with multiple args/keyword args (to be documented in the API and
referenced from here).
parameter_type: The parameter type specification if the target accepts a
parameter, or `None` if the target doesn't declare any parameters. Either
an instance of `types.Type`, or something that's convertible to it by
`types.to_type()`.
context_stack: The context stack to use.
Returns:
A tuple of (`pb.Computation`, `tff.Type`), where the computation contains
the instance with the `pb.TensorFlow` variant set, and the type is an
instance of `tff.Type`, potentially including Python container annotations,
for use by TensorFlow computation wrappers.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the signature of the target is not compatible with the given
parameter type.
"""
# TODO(b/113112108): Support a greater variety of target type signatures,
# with keyword args or multiple args corresponding to elements of a tuple.
# Document all accepted forms with examples in the API, and point to there
# from here.
py_typecheck.check_type(target, types.FunctionType)
py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
parameter_type = computation_types.to_type(parameter_type)
signature = function_utils.get_signature(target)
with tf.Graph().as_default() as graph:
if parameter_type is not None:
if len(signature.parameters) != 1:
raise ValueError(
'Expected the target to declare exactly one parameter, found {!r}.'
.format(signature.parameters))
parameter_name = next(iter(signature.parameters))
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
parameter_name, parameter_type, graph)
else:
if signature.parameters:
raise ValueError(
'Expected the target to declare no parameters, found {!r}.'.format(
signature.parameters))
parameter_value = None
parameter_binding = None
context = tensorflow_computation_context.TensorFlowComputationContext(graph)
with context_stack.install(context):
with variable_utils.record_variable_creation_scope() as all_variables:
if parameter_value is not None:
result = target(parameter_value)
else:
result = target()
initializer_ops = []
if all_variables:
# Use a readable but not-too-long name for the init_op.
name = 'init_op_for_' + '_'.join(
[v.name.replace(':0', '') for v in all_variables])
if len(name) > 50:
name = 'init_op_for_{}_variables'.format(len(all_variables))
initializer_ops.append(
tf.compat.v1.initializers.variables(all_variables, name=name))
initializer_ops.extend(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TABLE_INITIALIZERS))
if initializer_ops:
# Before running the main new init op, run any initializers for sub-
# computations from context.init_ops. Variables from import_graph_def
# will not make it into the global collections, and so will not be
# initialized without this code path.
with tf.compat.v1.control_dependencies(context.init_ops):
init_op_name = tf.group(
*initializer_ops, name='grouped_initializers').name
elif context.init_ops:
init_op_name = tf.group(
*context.init_ops, name='subcomputation_init_ops').name
else:
init_op_name = None
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result, graph)
type_signature = computation_types.FunctionType(parameter_type, result_type)
# WARNING: we do not really want to be modifying the graph here if we can
# avoid it. This is purely to work around performance issues uncovered with
# the non-standard usage of Tensorflow and have been discussed with the
# Tensorflow core team before being added.
clean_graph_def = _clean_graph_def(graph.as_graph_def())
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(clean_graph_def),
parameter=parameter_binding,
result=result_binding,
initialize_op=init_op_name)
return pb.Computation(
type=type_serialization.serialize_type(type_signature),
tensorflow=tensorflow), type_signature
def _clean_graph_def(graph_def: tf.compat.v1.GraphDef) -> tf.compat.v1.GraphDef:
"""Edit the GraphDef proto to make it more performant for TFF.
WARNING: This method must _NOT_ make any semantic changes (those that would
change the results of the computation). TFF does not really want to be
modifying the graph here if we can avoid it. This is purely to work around
performance issues uncovered with the non-standard usage of Tensorflow and
have been discussed with the Tensorflow core team before being added.
Args:
graph_def: the proto message to modify.
Returns:
A GraphDef that has been altered for performance, with no semantic
modifications.
"""
# TODO(b/153565654): remove this workaround once there is a way to prevent
# the OptimizeDataset ops from being added when serializing FunctionDef that
# received a tf.data.Dataset argument.
_remove_optimize_dataset_ops(graph_def)
return graph_def
# TODO(b/153565654): cleanup this workaround method when no longer needed.
def _remove_optimize_dataset_ops(graph_def: tf.compat.v1.GraphDef):
"""Removes `OptimizeDataset` and `ModelDataset` ops from the graph.
TensorFlow Federated creates and tears down datasets frequently (one for each
user); which is contrary to the TF expected usage of a Dataset where it is
setup once and used throughout a long training process.
In the TFF execution stack this leads to performance degradation where a lot
of time is spent optimizing a dataset that will soon be thrown away. The time
spent optimizing can even be as long as it takes to train on the dataset. For
that reason TFF turns off this optimization.
Luckily, it appears that generally `ModelDataset` and `OptimizeDataset` have a
fairly straightforward pattern in graphs (though a fragile assumption); they
have so far always appeared as:
dataset -> OptimizeDataset -> ModelDataset -> (DatasetReduce ...)
Each node is the first input to the next node. The following function simply
cuts out the middle nodes and connect the first input into OptimizeDataset
as the input to replace ModelDataset into the last op in the chain.
Args:
graph_def: the proto message to mutate in place.
"""
optimize_dataset_ops = ['OptimizeDataset', 'OptimizeDatasetV2']
ops_to_remove = optimize_dataset_ops + ['ModelDataset']
def is_control_dep(tensor_name: str) -> bool:
return tensor_name.startswith('^')
def normalized_tensor_name(tensor_name: str) -> str:
if is_control_dep(tensor_name):
return tensor_name[1:]
return tensor_name.split(':', maxsplit=2)[0]
def clean_input_tensor(
tensor_name: str,
names_to_nodes: Dict[str, tf.compat.v1.NodeDef],
input_args: Set[str],
) -> Optional[str]:
"""Rewire an input tensor that is output by a removed node."""
node_name = normalized_tensor_name(tensor_name)
if is_control_dep(tensor_name):
# Simply delete control deps on removed nodes, otherwise pass through.
input_node = names_to_nodes[node_name]
if input_node.op in ops_to_remove:
return None
return tensor_name
node = names_to_nodes.get(node_name)
if node is None:
if tensor_name in input_args:
return node_name
else:
raise ValueError('cannot handle input {n} ({nn})'.format(
n=tensor_name, nn=node_name))
if node.op not in ops_to_remove:
return tensor_name
if node.op in optimize_dataset_ops:
# The dataset is the first input to OptimizeDataset, so return to replace
# the dependency on OptimizeDataset.
return node.input[0]
elif node.op == 'ModelDataset':
# ModelDataset's first input is expected to be OptimizeDataset, we can
# walk up input chain and find the input to the OptimizeDataset and return
# that instead.
input_node_name = normalized_tensor_name(node.input[0])
input_node = names_to_nodes.get(input_node_name)
if input_node is None or input_node.op not in optimize_dataset_ops:
raise ValueError(
'Input to ModelDataset node was {o}, expected OptimizeDataset or '
'OptimizeDatasetV2. Unknown graph structure, aborting.'.format(
o=input_node.op if input_node is not None else 'None'))
return input_node.input[0]
else:
raise ValueError('Encoutered node [{n}] which is an op to remove, but '
'is not handled properly.'.format(n=node))
def filter_nodes(node_defs: MutableSequence[tf.compat.v1.NodeDef], args):
nodes_to_keep = []
names_to_nodes = {}
for node in node_defs:
names_to_nodes[node.name] = node
if node.op not in ops_to_remove:
nodes_to_keep.append(node)
func_arg_names = {arg.name for arg in args}
for node in nodes_to_keep:
clean_inputs = []
for input_name in node.input:
clean_input = clean_input_tensor(input_name, names_to_nodes,
func_arg_names)
if clean_input is not None:
clean_inputs.append(clean_input)
del node.input[:]
node.input.extend(clean_inputs)
del node_defs[:]
node_defs.extend(nodes_to_keep)
filter_nodes(graph_def.node, args=[])
for function in graph_def.library.function:
filter_nodes(function.node_def, args=function.signature.input_arg)
# The maximum size allowed for serialized sequence values. Sequence that
# serialize to values larger than this will result in errors being raised. This
# likely occurs when the sequence is dependent on, and thus pulling in, many of
# variables from the graph.
DEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES = 20 * (1024**2) # 20 MB
# TODO(b/137880330): there is likely opportunity here to share implementation
# with the serialization happening in
# `tensorflow_serialization.serialize_tf2_as_tf_computation()`. It would be good
# to sync with TF team about options for ensuring graph-only (variable-less)
# serializations.
def serialize_dataset(
dataset,
max_serialized_size_bytes=DEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES):
"""Serializes a `tf.data.Dataset` value into a `bytes` object.
Args:
dataset: A `tf.data.Dataset`.
max_serialized_size_bytes: An `int` size in bytes designating the threshold
on when to raise an error if the resulting serialization is too big.
Returns:
A `bytes` object that can be sent to
`tensorflow_serialization.deserialize_dataset` to recover the original
`tf.data.Dataset`.
Raises:
SerializationError: if there was an error in TensorFlow during
serialization.
"""
py_typecheck.check_type(dataset,
type_conversions.TF_DATASET_REPRESENTATION_TYPES)
module = tf.Module()
module.dataset = dataset
module.dataset_fn = tf.function(lambda: module.dataset, input_signature=())
temp_dir = tempfile.mkdtemp('dataset')
fd, temp_zip = tempfile.mkstemp('zip')
os.close(fd)
try:
tf.saved_model.save(module, temp_dir, signatures={})
with zipfile.ZipFile(temp_zip, 'w') as z:
for topdir, _, filenames in tf.io.gfile.walk(temp_dir):
dest_dir = topdir[len(temp_dir):]
for filename in filenames:
z.write(
os.path.join(topdir, filename), os.path.join(dest_dir, filename))
with open(temp_zip, 'rb') as z:
zip_bytes = z.read()
except Exception as e: # pylint: disable=broad-except
raise SerializationError(
'Error serializing tff.Sequence value. Inner error: {!s}'.format(
e)) from e
finally:
tf.io.gfile.rmtree(temp_dir)
tf.io.gfile.remove(temp_zip)
if len(zip_bytes) > max_serialized_size_bytes:
raise ValueError('Serialized size of Dataset ({:d} bytes) exceeds maximum '
'allowed ({:d} bytes)'.format(
len(zip_bytes), max_serialized_size_bytes))
return zip_bytes
def deserialize_dataset(serialized_bytes):
"""Deserializes a `bytes` object to a `tf.data.Dataset`.
Args:
serialized_bytes: `bytes` object produced by
`tensorflow_serialization.serialize_dataset`
Returns:
A `tf.data.Dataset` instance.
Raises:
SerializationError: if there was an error in TensorFlow during
serialization.
"""
py_typecheck.check_type(serialized_bytes, bytes)
temp_dir = tempfile.mkdtemp('dataset')
fd, temp_zip = tempfile.mkstemp('zip')
os.close(fd)
try:
with open(temp_zip, 'wb') as f:
f.write(serialized_bytes)
with zipfile.ZipFile(temp_zip, 'r') as z:
z.extractall(path=temp_dir)
loaded = tf.saved_model.load(temp_dir)
# TODO(b/156302055): Follow up here when bug is resolved, either remove
# if this function call stops failing by default, or leave if this is
# working as intended.
with tf.device('cpu'):
ds = loaded.dataset_fn()
except Exception as e: # pylint: disable=broad-except
raise SerializationError(
'Error deserializing tff.Sequence value. Inner error: {!s}'.format(
e)) from e
finally:
tf.io.gfile.rmtree(temp_dir)
tf.io.gfile.remove(temp_zip)
return ds
|
##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Incident Management System data model JSON serialization/deserialization
"""
from datetime import datetime as DateTime
from enum import Enum
from typing import Any, Callable, Iterable, Mapping, Union, cast
from cattr import Converter
from twisted.logger import Logger
from ims.ext.frozendict import FrozenDict
from ims.ext.json import dateTimeAsRFC3339Text, rfc3339TextAsDateTime
__all__ = ()
log = Logger()
JSON = Union[Mapping[str, Any], Iterable, int, str, float, bool, None]
class JSONCodecError(Exception):
"""
Error while serializing or deserializing JSON data.
"""
converter = Converter()
jsonSerialize: Callable[[Any], JSON] = converter.unstructure
jsonDeserialize = converter.structure
registerSerializer = converter.register_unstructure_hook
registerDeserializer = converter.register_structure_hook
# DateTime
registerSerializer(DateTime, dateTimeAsRFC3339Text)
def deserializeDateTime(obj: str, cl: type[DateTime]) -> DateTime:
assert cl is DateTime, (cl, obj)
return rfc3339TextAsDateTime(obj)
registerDeserializer(DateTime, deserializeDateTime)
# Tuples and sets should serialize like lists
def serializeIterable(iterable: Iterable[Any]) -> list[JSON]:
return [jsonSerialize(item) for item in iterable]
registerSerializer(frozenset, serializeIterable)
registerSerializer(set, serializeIterable)
registerSerializer(tuple, serializeIterable)
# FrozenDict
def serializeFrozenDict(frozenDict: FrozenDict[str, Any]) -> JSON:
return jsonSerialize(dict(frozenDict))
registerSerializer(FrozenDict, serializeFrozenDict)
def deserializeFrozenDict(
obj: Mapping[str, JSON], cl: type[FrozenDict[str, JSON]]
) -> FrozenDict[str, JSON]:
assert cl is FrozenDict, (cl, obj)
return FrozenDict.fromMapping(obj)
# Public API
def jsonObjectFromModelObject(model: Any) -> JSON:
return jsonSerialize(model)
def modelObjectFromJSONObject(json: JSON, modelClass: type) -> Any:
try:
return jsonDeserialize(json, modelClass)
except KeyError:
raise JSONCodecError(f"Invalid JSON for {modelClass.__name__}: {json}")
# Utilities
def deserialize(
obj: dict[str, Any],
cls: type[Any],
typeEnum: type[Enum],
keyEnum: type[Enum],
) -> Any:
def deserializeKey(key: Enum) -> Any:
try:
cls = getattr(typeEnum, key.name).value
except AttributeError:
raise AttributeError(
"No attribute {attribute!r} in type enum {enum!r}".format(
attribute=key.name, enum=typeEnum
)
)
try:
return jsonDeserialize(obj.get(key.value, None), cls)
except Exception:
log.error(
"Unable to deserialize {key} as {cls} from {json}",
key=key,
cls=cls,
json=obj,
)
raise
return cls(
**{
key.name: deserializeKey(key)
for key in cast(Iterable[Enum], keyEnum)
}
)
|
import random
import os
import csv
from itertools import product
from pytest import fail
from tqdm import tqdm
from operator import itemgetter
from generate_entropy import *
from wordle import *
def generate_guess(previous_board, previous_guess, wordlist, turn, total_words, combs, guess, second_guess_scores, data):
if turn == 0:
return guess, wordlist
if turn == 1:
board_name = "".join([str(int) for int in list(previous_board)])
guess_list = second_guess_scores[board_name]
filtered_words = {guess["word"]: guess["word"] for guess in guess_list}
return max(guess_list, key=itemgetter('score'))["word"], filtered_words
else:
filtered_words = filter_words(previous_board, previous_guess, wordlist)
next_guess = {}
for word in filtered_words:
entropy = get_entropy(word, filtered_words, combs, total_words)
next_guess[word] = calculate_score(entropy, data[word])
filtered_words = {word: word for word in filtered_words}
sorted_next_guess = list(dict(
sorted(next_guess.items(), key=lambda item: item[1], reverse=True)).keys())[0]
return sorted_next_guess, filtered_words
def generate_guess_matrix(previous_board, previous_guess, turn, guesses, words_ordered, match_matrix, comb_map, total_words, guess, second_guess_scores, data):
if turn == 0:
return guess
if turn == 1:
board_name = "".join([str(int) for int in list(previous_board)])
guess_list = second_guess_scores[board_name]
return guess_list["word"]
else:
guess_index = words_ordered.index(previous_guess)
guess_combs = match_matrix[guess_index]
board_name = "".join([str(int) for int in list(previous_board)])
comb_number = comb_map[board_name]
indices = np.where(guess_combs == comb_number)
# print(board_name, np.array(words)[indices])
score_list = []
if indices[0].size != 0:
for i, row in enumerate(match_matrix[indices]):
word_matches = row[indices]
freq_map = dict(collections.Counter(word_matches))
entropy = get_entropy_from_map(freq_map, total_words)
score_list.append({
"word": words_ordered[int(indices[0][i])],
"score": calculate_score(entropy, data[words_ordered[int(indices[0][i])]])
})
sorted_guess = sorted(score_list, reverse=True, key=lambda d: d['score'])
for i in range(len(score_list)):
final_guess = sorted_guess[i]['word']
if final_guess not in guesses:
break
return final_guess
# word = random.choice(words)
# board = [0, 0, 0, 0, 0]
# print("Answer: ", word)
# guess = list(first_guess_list.keys())[0]
# for turn in range(6):
# previous_guess = guess
# guess = generate_guess(board, previous_guess, wordlist, turn)
# print(guess)
# board = check_guess(word, guess)
# print_board(board, outcomes)
# if board == [2, 2, 2, 2, 2]:
# print("Completed in ", turn + 1,"/ 6")
# break
def run_simulation(outcomes, wordlist, words, first_guess, second_guess_scores, total_words, combs, data, extended=False):
# comb_map = {"".join([str(int) for int in list(comb)]): i for i, comb in enumerate(combs)}
# match_matrix = np.load(os.path.join('datasets', 'match_matrix.npy'))
# words_ordered = [word for word in wordlist]
# words_ordered.sort()
score_total = 0
failed_games = 0
total_games = len(words)
# total_games = 10
record = {}
record["games"] = {}
if extended:
turns = 30
else:
turns = 6
for index, word in enumerate(tqdm(words)):
board = [0, 0, 0, 0, 0]
guess = first_guess
score = 1
filtered_wordlist = wordlist
game = {}
game["answer"] = word
game["guesses"] = []
game["share"] = "Wordle " + str(index) + " {}/6*\n\n"
boards = ""
for turn in range(turns):
previous_guess = guess
guess, filtered_wordlist = generate_guess(
board, previous_guess, filtered_wordlist, turn, total_words, combs, guess, second_guess_scores, data)
board = check_guess(word, guess)
game["guesses"].append(guess)
boards += print_board(board, outcomes)+"\n"
if board == [2, 2, 2, 2, 2]:
break
score += 1
if turn == turns-1:
score = 0
if score == 0:
failed_games += 1
game["share"] = game["share"].format("X")
game["score"] = "X"
else:
game["share"] = game["share"].format(score)
game["score"] = score
game["share"] += boards
record["games"][index] = game
score_total += score
# print(score_total/(index +1 -failed_games))
record["stats"] = {
"total": total_games,
"failed": failed_games,
"average": score_total/(total_games-failed_games)
}
return record
|
'''OpenGL extension NV.texture_barrier
This module customises the behaviour of the
OpenGL.raw.GL.NV.texture_barrier to provide a more
Python-friendly API
Overview (from the spec)
This extension relaxes the restrictions on rendering to a currently
bound texture and provides a mechanism to avoid read-after-write
hazards.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/texture_barrier.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.texture_barrier import *
### END AUTOGENERATED SECTION |
from .preprocess import (
read_raw_data,
read_processed_data,
preprocess_data,
get_label,
get_featues,
)
|
def count_primes2(num):
primes = [2]
x = 3
if num < 2:
return 0
while x <= num:
for y in primes: # use the primes list!
if x%y == 0:
x += 2
break
else:
primes.append(x)
x += 2
print(primes)
return len(primes)
num= int(input("Enter limit: "))
count_primes2(num)
|
############### Stats #################
stats_params = {
# Read
"fragment_length" : 650,
"mapping_quality" : 20,
"base_quality" : 20,
# ALT
"dp_limit" : 5,
"alt_ratio_limit" : 0.2,
"sample_vote_limit" : 2,
"vote_ratio_limit" : 0.9,
"snp_read_limit" : 1,
# Indel
"indel_ratio" : 0.05,
# Bulk
"bulk_ref_limit" : 1, #this requires the remaining percent to be only a1
"acceptable_bases" : ['A', 'C', 'G', 'T'],
}
############### Analyze #################
analyze_params = {
"dp_tuple_limit" : 5,
"snp_total_vote" : 0.9,
"snp_vote_ratio" : 0.9,
"tuples_ratio" : 0.9,
"tuples_internal_ratio" : 0.1,
"tuples_c2_external_error_ratio" : 0.1, # 1-tuples_ratio
"c3_a1_limit" : 2,
"c3_homo_limit" : 2,
"homo_error_allowed" : 0,
"tuple_group_ratio" : 0.01,
"win_internal_group_ratio" : 0.1,
"sample_tuple_vote_limit" : 2,
"vote_tuple_ratio_limit" : 0.9,
"conflicting_upper_limit" : 0,
"c3_conflicting_upper_limit" : 0,
"a1_lower_limit" : 2,
"bulk_dp_interval" : (15,65), # Must have the following format (min, max)
}
################ Misc ##################
misc_params = {
"mut_nr_limit" : 1,
"mut_dist_limit" : 1000,
"snp_nr_limit" : 10,
"snp_dist_limit" : 1000,
}
trees = {
'tree1':{
'samples':['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15'],
'params': { 'MIN_HET': 2, 'MIN_HOM': 1, 'MAX_HET': 14, 'MAX_HOM': 13}
},
'all':{
'samples':['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15'],
'params': { 'MIN_HET': 2, 'MIN_HOM': 1, 'MAX_HET': 14, 'MAX_HOM': 13}
}
}
|
from collections import Counter
from collections import defaultdict
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from preprocess import process_text_data
from utils import load_epub
def get_word_frequency_information(book):
book_bow = defaultdict(list)
stopword = stopwords.words('english')
for chapter, text in book.items():
for sentence in text:
for word in sentence.split():
if word in stopword:
continue
book_bow[chapter].append(word)
chapters = list()
words = list()
frequencies = list()
for chapter, bow in book_bow.items():
counter = Counter(bow)
for word, frequency in counter.most_common():
chapters.append(chapter)
words.append(word)
frequencies.append(frequency)
word_frequency_information = pd.DataFrame(
zip(
chapters,
words,
frequencies
),
columns=['chapter', 'word', 'frequency']
)
word_frequency_information.to_csv(
'./output/dataframes/q2-word_frequency_information.csv',
index=False, encoding='utf-8'
)
return word_frequency_information
def get_top_word_frequency_information(word_freq_info, topn=100):
words = word_freq_info.word.unique()
frequencies =\
word_freq_info.groupby(by='word').sum().loc[words, 'frequency'].values
top_word_freq_info = pd.DataFrame(
zip(
words,
frequencies
),
columns=['word', 'frequency']
)
top_word_freq_info = top_word_freq_info.sort_values(
by='frequency', ascending=False, ignore_index=True
)
top_word_freq_info = top_word_freq_info.head(topn)
top_word_freq_info.to_csv(
'./output/dataframes/q2-top_word_frequency_information.csv',
index=False, encoding='utf-8'
)
return top_word_freq_info
def draw_barplot_top_word_frequency_information(top_word_freq_info, badwords):
badword_index =\
top_word_freq_info[top_word_freq_info.word.isin(badwords)].index
non_badword_index =\
top_word_freq_info[~top_word_freq_info.word.isin(badwords)].index
plt.figure(figsize=(12, 9))
plt.bar(
x=badword_index,
height=top_word_freq_info.loc[badword_index, 'frequency'],
color='r', label='badwords{goddam, hell, damn, bastard}'
)
plt.bar(
x=non_badword_index,
height=top_word_freq_info.loc[non_badword_index, 'frequency'],
color='b'
)
plt.legend()
plt.xticks([])
plt.xlabel('word')
plt.ylabel('frequency')
plt.title('Top Word Frequency Distribution. (feat. badwords)')
plt.savefig('./output/figures/q2-top_word_frequency.png')
def draw_wordcloud_top_word_frequency_information(top_word_freq_info):
word_freq = {word: freq for word, freq in zip(
top_word_freq_info['word'], top_word_freq_info['frequency']
)}
wc = WordCloud(
width=800,
height=800,
background_color='white',
max_words=1000,
contour_width=3,
contour_color='firebrick',
random_state=2020
)
wc = wc.generate_from_frequencies(word_freq)
plt.figure(figsize=(12, 12))
plt.imshow(wc)
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.title('WordCloud for Top Words in Novel')
plt.savefig('./output/figures/q2-top_word_frequency_wordcloud.png')
if __name__ == "__main__":
epub_path = \
'./ebook/J. D. Salinger - The Catcher in the Rye '\
'(1951, Penguin Books Ltd).epub'
book = load_epub(epub_path)
book = process_text_data(book)
word_freq_info = get_word_frequency_information(book)
top_word_freq_info = get_top_word_frequency_information(
word_freq_info, topn=200
)
badwords = ['goddam', 'hell', 'damn', 'bastard']
draw_barplot_top_word_frequency_information(top_word_freq_info, badwords)
draw_wordcloud_top_word_frequency_information(top_word_freq_info)
|
# IPython log file
# cd ~/projects/play
# make sure cache.py is around
get_ipython().run_line_magic('run', '-i cache.py')
_correlate_sparse_offsets.inspect_types()
_correlate_sparse_offsets.inspect_types(pretty=True)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('pinfo', '_correlate_sparse_offsets.inspect_types')
_correlate_sparse_offsets.inspect_types()
import numpy as np
from numba import jit
def find_instr(func, keyword, sig=0, limit=5):
count = 0
for l in func.inspect_asm(func.signatures[sig]).split('\n'):
if keyword in l:
count += 1
print(l)
if count >= limit:
break
if count == 0:
print('No instructions found')
@jit(nopython=True)
def sqdiff(x, y):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[i] = (x[i] - y[i])**2
return out
x32 = np.linspace(1, 2, 10000, dtype=np.float32)
y32 = np.linspace(2, 3, 10000, dtype=np.float32)
sqdiff(x32, y32)
x64 = x32.astype(np.float64)
y64 = y32.astype(np.float64)
sqdiff(x64, y64)
sqdiff.signatures
get_ipython().run_line_magic('timeit', 'sqdiff(x32, y32)')
get_ipython().run_line_magic('timeit', 'sqdiff(x64, y64)')
print('float32:')
find_instr(sqdiff, keyword='subp', sig=0)
print('---\nfloat64:')
find_instr(sqdiff, keyword='subp', sig=1)
@jit(nopython=True)
def frac_diff1(x, y):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[i] = 2 * (x[i] - y[i]) / (x[i] + y[i])
return out
frac_diff1(x32, y32)
find_instr(frac_diff1, keyword='subp', sig=0)
@jit(nopython=True, error_model='numpy')
def frac_diff2(x, y):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[i] = 2 * (x[i] - y[i]) / (x[i] + y[i])
return out
frac_diff2(x32, y32)
find_instr(frac_diff2, keyword='subp', sig=0)
frac_diff2(x64, y64)
get_ipython().run_line_magic('timeit', 'frac_diff2(x32, y32)')
get_ipython().run_line_magic('timeit', 'frac_diff2(x64, y64)')
frac_diff2.inspect_types(pretty=True)
@jit(nopython=True, error_model='numpy')
def frac_diff3(x, y):
out = np.empty_like(x)
dt = x.dtype # Cast the constant using the dtype of the input
for i in range(x.shape[0]):
# Could also use np.float32(2) to always use same type, regardless of input
out[i] = dt.type(2) * (x[i] - y[i]) / (x[i] + y[i])
return out
frac_diff3(x32, y32)
frac_diff3(x64, y64)
get_ipython().run_line_magic('timeit', 'frac_diff3(x32, y32)')
get_ipython().run_line_magic('timeit', 'frac_diff3(x64, y64)')
SQRT_2PI = np.sqrt(2 * np.pi)
@jit(nopython=True, error_model='numpy', fastmath=True)
def kde(x, means, widths):
'''Compute value of gaussian kernel density estimate.
x - location of evaluation
means - array of kernel means
widths - array of kernel widths
'''
n = means.shape[0]
acc = 0.
for i in range(n):
acc += np.exp( -0.5 * ((x - means[i]) / widths[i])**2 ) / widths[i]
return acc / SQRT_2PI / n
@jit(nopython=True)
def sqdiff_indirect(x, y, indirection):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[indirection[i]] = (x[indirection[i]] - y[indirection[i]])**2
return out
indirection = np.arange(x32.size)
get_ipython().run_line_magic('timeit', 'sqdiff_indirect(x32, y32, indirection)')
get_ipython().run_line_magic('timeit', 'sqdiff_indirect(x64, y64, indirection)')
print('float32:')
find_instr(sqdiff_indirect, keyword='subp', sig=0)
print('---\nfloat64:')
find_instr(sqdiff_indirect, keyword='subp', sig=1)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='subp', sig=0)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='subp', sig=0)
_correlate_sparse_offsets.inspect_types()
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='subp', sig=0)
find_instr(_correlate_sparse_offsets, keyword='mulp', sig=0)
find_instr(_correlate_sparse_offsets, keyword='p', sig=0)
find_instr(_correlate_sparse_offsets, keyword='pd', sig=1)
find_instr(_correlate_sparse_offsets, keyword='pd', sig=0)
find_instr(_correlate_sparse_offsets, keyword='pf', sig=1)
find_instr(_correlate_sparse_offsets, keyword='ps', sig=1)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='pd', sig=0)
find_instr(_correlate_sparse_offsets, keyword='ps', sig=1)
_correlate_sparse_offsets.inspect_asm
_correlate_sparse_offsets.inspect_asm()
get_ipython().run_line_magic('pinfo', '_correlate_sparse_offsets.inspect_asm')
_correlate_sparse_offsets.inspect_asm(0)
q
_correlate_sparse_offsets.inspect_asm()[0]
_correlate_sparse_offsets.inspect_asm().keys()
print(list(_correlate_sparse_offsets.inspect_asm().values())[0])
|
# solutions.py
"""Introductory Labs: Intro to Matplotlib. Solutions file."""
import numpy as np
from matplotlib import pyplot as plt
def var_of_means(n):
"""Construct a random matrix A with values drawn from the standard normal
distribution. Calculate the mean value of each row, then calculate the
variance of these means. Return the variance.
Inputs:
n (int): The number of rows and columns in the matrix A.
Returns:
(float) The variance of the means of each row.
"""
A = np.random.randn(n,n)
return A.mean(axis=1).var()
def prob1():
"""Create an array of the results of var_of_means() with inputs
n = 100, 200, ..., 1000. Plot and show the resulting array.
"""
y = np.array([var_of_means(n) for n in xrange(100, 1100, 100)])
plt.plot(y)
plt.show()
def prob2():
"""Plot the functions sin(x), cos(x), and arctan(x) on the domain
[-2pi, 2pi]. Make sure the domain is refined enough to produce a figure
with good resolution.
"""
x = np.linspace(-2*np.pi, 2*np.pi, 200)
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x))
plt.plot(x, np.arctan(x))
plt.show()
def prob3():
"""Plot the curve f(x) = 1/(x-1) on the domain [-2,6].
1. Split the domain so that the curve looks discontinuous.
2. Plot both curves with a thick, dashed magenta line.
3. Change the range of the y-axis to [-6,6].
"""
x1, x2 = np.split(np.linspace(-2, 6, 200), [75])
# x1, x2 = np.linspace(-2, 1, 75), np.linspace(1, 6, 125)
plt.plot(x1, 1/(x1 - 1), 'm--', lw=4)
plt.plot(x2, 1/(x2 - 1), 'm--', lw=4)
plt.ylim(-6, 6)
plt.show()
def prob4():
"""Plot the functions sin(x), sin(2x), 2sin(x), and 2sin(2x) on the
domain [0, 2pi].
1. Arrange the plots in a square grid of four subplots.
2. Set the limits of each subplot to [0, 2pi]x[-2, 2].
3. Give each subplot an appropriate title.
4. Give the overall figure a title.
5. Use the following line colors and styles.
sin(x): green solid line.
sin(2x): red dashed line.
2sin(x): blue dashed line.
2sin(2x): magenta dotted line.
"""
x = np.linspace(0, 2*np.pi, 200)
plt.subplot(221) # sin(x)
plt.plot(x, np.sin(x), 'g-', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("sin(x)")
plt.subplot(222) # sin(2x)
plt.plot(x, np.sin(2*x), 'r--', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("sin(2x)")
plt.subplot(223) # 2sin(x)
plt.plot(x, 2*np.sin(x), 'b--', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("2sin(x)")
plt.subplot(224) # 2sin(2x)
plt.plot(x, 2*np.sin(2*x), 'm:', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("2sin(2x)")
plt.suptitle("Solution to Problem 4 (subplots)")
plt.show()
def prob5():
"""Visualize the data in FARS.npy. Use np.load() to load the data, then
create a single figure with two subplots:
1. A scatter plot of longitudes against latitudes. Because of the
large number of data points, use black pixel markers (use "k,"
as the third argument to plt.plot()). Label both axes.
2. A histogram of the hours of the day, with one bin per hour.
Label and set the limits of the x-axis.
"""
data = np.load("FARS.npy")
plt.subplot(211)
plt.plot(data[:,1], data[:,2], 'k,')
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plt.axis("equal")
plt.subplot(212)
plt.hist(data[:,0], bins=24, range=[-.5, 23.5])
plt.xlim(-.5,23.5)
plt.xlabel("Hour (Military Time)")
plt.suptitle("Solution to Problem 5 (FARS data)")
plt.show()
def prob6():
"""Plot the function f(x,y) = sin(x)sin(y)/xy on the domain
[-2pi, 2pi]x[-2pi, 2pi].
1. Create 2 subplots: one with a heat map of f, and one with a contour
map of f. Choose an appropriate number of level curves, or specify
the curves yourself.
2. Set the limits of each subplot to [-2pi, 2pi]x[-2pi, 2pi].
3. Choose a non-default color scheme.
4. Add a colorbar to each subplot.
"""
# Define the mesgrid and calculate f() on the grid.
x = np.linspace(-2*np.pi, 2*np.pi, 200)
y = np.copy(x)
X, Y = np.meshgrid(x,y)
Z = np.sin(X)*np.sin(Y)/(X*Y)
plt.subplot(121) # Heat map.
plt.pcolormesh(X, Y, Z, cmap="Spectral")
plt.axis([-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
plt.colorbar()
plt.subplot(122) # Contour map.
plt.contour(X, Y, Z, 10, cmap="Spectral")
plt.axis([-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
plt.colorbar()
plt.suptitle("Solution to Problem 6 (meshgrids)")
plt.show()
|
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None #Turn off SettingWithCopyWarning
import os
import omnitool
import argparse
parser = argparse.ArgumentParser(description='Recalculate Bolometric Corrections for a given temperature perturbation')
parser.add_argument('tempdiff', default=0., type=float,
help='Perturbation to the temperature values in K')
parser.add_argument('stage', type=str, choices=['load','unload'],
help='Load prepares the data for BCcodes. Unload saves it to a location of choice.')
parser.add_argument('-pl', '--perturb_logg', action='store_const', const=True, default=False,
help='If true, perturb our value of logg using seismic scaling relations for the perturbed Teff')
parser.add_argument('-r', '--reddening', action='store_const', const=True, default=False,
help='If true, include reddening in the interpolation. WARNING: This is *not* required for the Hall+18 work.')
parser.add_argument('-a', '--apokasc', action='store_const', const=True, default=False,
help='If true, return a set of BCs calculated for the APOKASC subsample')
args = parser.parse_args()
__datadir__ = os.path.expanduser('~')+'/PhD/Gaia_Project/data/KepxDR2/'
__bccodes__ = os.path.expanduser('~')+'/PhD/Hacks_and_Mocks/bolometric-corrections/BCcodes/'
if __name__ == '__main__':
if args.stage == 'load':
if args.apokasc:
cdf = pd.read_csv(__datadir__+'rcxyuxapokasc2.csv')
else:
cdf = pd.read_csv(__datadir__+'rcxyu18_pre_elsworth.csv')
out = cdf[['KICID','[Fe/H]']] #Load in fixed values
out['Teff'] = cdf['Teff'] + args.tempdiff #Add temperature perturbation
if not args.perturb_logg:
out['logg'] = cdf['logg']
else:
sc = omnitool.scalings(cdf.numax, cdf.dnu, out.Teff)
out['logg'] = sc.get_logg()
if not args.reddening:
out['Ebv'] = np.zeros(len(out))
else:
out['Ebv'] = cdf['Ebv']
out = out[['KICID','logg','[Fe/H]','Teff','Ebv']]
out.to_csv(__bccodes__+'input.sample.all', sep='\t', header=False, index=False,)
print('Data loaded for Temperature perturbation of: '+str(args.tempdiff))
if args.stage == 'unload':
bcall = pd.read_csv(__bccodes__+'output.file.all', sep='\s+')
bcall.rename(columns={'ID':'KICID',
'BC_1':'BC_J',
'BC_2':'BC_H',
'BC_3':'BC_K',
'BC_4':'BC_GAIA'}, inplace=True)
bcall.drop(columns=['log(g)','[Fe/H]','Teff','E(B-V)','BC_5'], inplace=True)
bcall.to_csv(__datadir__+'BCs/casagrande_bcs_'+str(args.tempdiff)+'.csv',index=False)
|
def rel_height(node, node_height):
return max(
node_height,
rel_height(node.left, node_height + 1),
rel_height(node.right, node_height + 1)
) if node else 0
def height(root):
return rel_height(root, 0) |
"""CLI command for "publish" command."""
import json
import click
import boto3
from botocore.exceptions import ClientError
from serverlessrepo import publish_application
from serverlessrepo.publish import CREATE_APPLICATION
from serverlessrepo.exceptions import ServerlessRepoError
from samcli.cli.main import pass_context, common_options as cli_framework_options, aws_creds_options
from samcli.commands._utils.options import template_common_option
from samcli.commands._utils.template import get_template_data
from samcli.commands.exceptions import UserException
HELP_TEXT = """
Use this command to publish a packaged AWS SAM template to
the AWS Serverless Application Repository to share within your team,
across your organization, or with the community at large.\n
\b
This command expects the template's Metadata section to contain an
AWS::ServerlessRepo::Application section with application metadata
for publishing. For more details on this metadata section, see
https://docs.aws.amazon.com/serverlessrepo/latest/devguide/serverless-app-publishing-applications.html
\b
Examples
--------
To publish an application
$ sam publish -t packaged.yaml --region <region>
"""
SHORT_HELP = "Publish a packaged AWS SAM template to the AWS Serverless Application Repository."
SERVERLESSREPO_CONSOLE_URL = "https://console.aws.amazon.com/serverlessrepo/home?region={}#/published-applications/{}"
@click.command("publish", help=HELP_TEXT, short_help=SHORT_HELP)
@template_common_option
@aws_creds_options
@cli_framework_options
@pass_context
def cli(ctx, template):
# All logic must be implemented in the ``do_cli`` method. This helps with easy unit testing
do_cli(ctx, template) # pragma: no cover
def do_cli(ctx, template):
"""Publish the application based on command line inputs."""
try:
template_data = get_template_data(template)
except ValueError as ex:
click.secho("Publish Failed", fg='red')
raise UserException(str(ex))
try:
publish_output = publish_application(template_data)
click.secho("Publish Succeeded", fg="green")
click.secho(_gen_success_message(publish_output), fg="yellow")
except ServerlessRepoError as ex:
click.secho("Publish Failed", fg='red')
raise UserException(str(ex))
except ClientError as ex:
click.secho("Publish Failed", fg='red')
raise _wrap_s3_uri_exception(ex)
application_id = publish_output.get('application_id')
_print_console_link(ctx.region, application_id)
def _gen_success_message(publish_output):
"""
Generate detailed success message for published applications.
Parameters
----------
publish_output : dict
Output from serverlessrepo publish_application
Returns
-------
str
Detailed success message
"""
application_id = publish_output.get('application_id')
details = json.dumps(publish_output.get('details'), indent=2)
if CREATE_APPLICATION in publish_output.get('actions'):
return "Created new application with the following metadata:\n{}".format(details)
return 'The following metadata of application "{}" has been updated:\n{}'.format(application_id, details)
def _print_console_link(region, application_id):
"""
Print link for the application in AWS Serverless Application Repository console.
Parameters
----------
region : str
AWS region name
application_id : str
The Amazon Resource Name (ARN) of the application
"""
if not region:
region = boto3.Session().region_name
console_link = SERVERLESSREPO_CONSOLE_URL.format(region, application_id.replace('/', '~'))
msg = "Click the link below to view your application in AWS console:\n{}".format(console_link)
click.secho(msg, fg="yellow")
def _wrap_s3_uri_exception(ex):
"""
Wrap invalid S3 URI exception with a better error message.
Parameters
----------
ex : ClientError
boto3 exception
Returns
-------
Exception
UserException if found invalid S3 URI or ClientError
"""
error_code = ex.response.get('Error').get('Code')
message = ex.response.get('Error').get('Message')
if error_code == 'BadRequestException' and "Invalid S3 URI" in message:
return UserException(
"Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application "
"artifacts to S3 by packaging the template: 'sam package --template-file <file-path>'.")
return ex
|
# Helper module used by the signed-off-by checker and email address
# checker.
#
# This module handles all the common code stuff between the various
# checkers.
import os
import sys
import logging
import argparse
from git import Repo
def run(checker_name, per_commit_callback, result_messages):
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
logging.info("%s starting" % (checker_name))
argparser = argparse.ArgumentParser(description='Per-commit PR checker')
argparser.add_argument('--status-msg-file',
help='File in which to print the GitHub status message',
type=str, required=True)
argparser.add_argument('--gitdir', help='Git directory', type=str,
required=True)
argparser.add_argument('--base-branch', help='Merge base branch name',
type=str, required=True)
argparser.add_argument('--pr-branch', help='PR branch name',
type=str, required=True)
args = argparser.parse_args()
args_dict = vars(args)
base_branch = args_dict['base_branch']
pr_branch = args_dict['pr_branch']
clone_dir = args_dict['gitdir']
logging.info("Git clone: %s" % (clone_dir))
logging.info("PR branch: %s" % (pr_branch))
logging.info("Base branch: %s" % (base_branch))
#--------------------------------------
# Make a python object representing the Git repo
repo = Repo(clone_dir)
merge_base = repo.commit(base_branch)
logging.info("Merge base: %s" % (merge_base.hexsha))
#--------------------------------------
# Iterate from the HEAD of the PR branch down to the merge base with
# the base branch.
results = {
'good' : 0,
'bad' : 0,
}
for commit in repo.iter_commits(repo.commit(pr_branch)):
if commit.binsha == merge_base.binsha:
logging.info("Found the merge base %s: we're done" % (commit.hexsha))
break
per_commit_callback(commit, results)
#--------------------------------------
# Analyze what happened
if results['good'] == 0 and results['bad'] == 0:
msg = 'No commits -- nothing to do'
status = 0
elif results['good'] > 0 and results['bad'] == 0:
msg = result_messages['all good']['message']
status = result_messages['all good']['status']
elif results['good'] > 0 and results['bad'] > 0:
msg = result_messages['some good']['message']
status = result_messages['some good']['status']
else:
msg = result_messages['none good']['message']
status = result_messages['none good']['status']
print(msg)
with open(args_dict['status_msg_file'], 'w') as writer:
writer.write(msg)
exit(status)
|
import pytorch_lightning as pl
import torch
from xmuda.models.modules import Net2DFeat, Net3DFeat, FuseNet
from xmuda.models.LMSCNet import LMSCNet
from xmuda.common.utils.metrics import Metrics
import pickle
import numpy as np
import time
import os.path as osp
class RecNetLMSC(pl.LightningModule):
def __init__(self, preprocess_dir):
super().__init__()
self.class_frequencies = np.array([5.41773033e+09, 1.57835390e+07, 1.25136000e+05, 1.18809000e+05,
6.46799000e+05, 8.21951000e+05, 2.62978000e+05, 2.83696000e+05,
2.04750000e+05, 6.16887030e+07, 4.50296100e+06, 4.48836500e+07,
2.26992300e+06, 5.68402180e+07, 1.57196520e+07, 1.58442623e+08,
2.06162300e+06, 3.69705220e+07, 1.15198800e+06, 3.34146000e+05])
self.class_num = 20
self.lmscnet = LMSCNet(
class_num=self.class_num,
class_frequencies=self.class_frequencies)
with open(osp.join(preprocess_dir, "visible_voxels.pkl"), 'rb') as f:
self.invisible_voxels = pickle.load(f)
self.train_metrics = Metrics(self.class_num)
self.val_metrics = Metrics(self.class_num)
self.train_metrics_visible = Metrics(self.class_num)
self.val_metrics_visible = Metrics(self.class_num)
# tensorboard = self.logger.experiment
def forward(self, batch):
occupancy = batch['voxel_occupancy'].cuda()
# n_points_3d = batch['n_points_3d']
# img = batch['img']
# bs = img.shape[0]
# coords_3d = batch['coords_3d']
# occupancy = torch.zeros(bs, 256, 256, 32, device=self.device)
# # print(coords_3d.shape)
# # prev = 0
# for i in range(bs):
# idx = coords_3d[:, 3] == i
# b_coords = coords_3d[idx]
# occupancy[i, b_coords[:, 0], b_coords[:, 1], b_coords[:, 2]] = 1.0
# # prev = n_point
# occupancy = occupancy.transpose(2, 3)
out = self.lmscnet(occupancy)
return out
def training_step(self, batch, batch_idx):
pred = self(batch)
target = batch['ssc_label_1_4'].cuda()
loss = self.lmscnet.compute_loss(pred, target)
self.train_metrics.add_batch(prediction=pred, target=target)
self.train_metrics_visible.add_batch(prediction=pred,
target=target,
scenes=batch['scene'],
invisible_data_dict=self.invisible_voxels)
self.log('train/loss', loss.item())
self.log('train/loss_ssc', loss.item())
for metrics, suffix in [(self.train_metrics, ""), (self.train_metrics_visible, "_visible")]:
self.log("train/mIoU" + suffix,
metrics.get_semantics_mIoU().item())
self.log("train/IoU" + suffix, metrics.get_occupancy_IoU().item())
self.log("train/Precision" + suffix,
metrics.get_occupancy_Precision().item())
self.log("train/Recall" + suffix,
metrics.get_occupancy_Recall().item())
self.log("train/F1" + suffix, metrics.get_occupancy_F1().item())
metrics.reset_evaluator()
return loss
def validation_step(self, batch, batch_idx):
pred = self(batch)
target = batch['ssc_label_1_4'].cuda()
loss = self.lmscnet.compute_loss(pred, target)
# loss = self.bce_logits_loss(logits, occ_labels)
self.log('val/loss', loss.item())
self.log('val/loss_ssc', loss.item())
self.val_metrics.add_batch(prediction=pred, target=target)
self.val_metrics_visible.add_batch(prediction=pred,
target=target,
scenes=batch['scene'],
invisible_data_dict=self.invisible_voxels)
# pred_occ_labels = (torch.sigmoid(logits) > 0.5).float()
# acc = (pred_occ_labels == occ_labels).float().mean()
# self.log('train/acc', acc.item())
def validation_epoch_end(self, outputs):
for metrics, suffix in [(self.val_metrics, ""), (self.val_metrics_visible, "_visible")]:
self.log("val/mIoU" + suffix,
metrics.get_semantics_mIoU().item())
self.log("val/IoU" + suffix, metrics.get_occupancy_IoU().item())
self.log("val/Precision" + suffix,
metrics.get_occupancy_Precision().item())
self.log("val/Recall" + suffix,
metrics.get_occupancy_Recall().item())
self.log("val/F1" + suffix, metrics.get_occupancy_F1().item())
metrics.reset_evaluator()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-4)
return optimizer
|
from aiogram import Dispatcher
from app.handlers.private import default, start, help_
def setup(dp: Dispatcher):
for module in (start, default, help_):
module.setup(dp)
|
# -*- coding:utf-8 -*-
from beryllium.field import FieldList
from beryllium.tabsetup import TabSetup
from beryllium.listcssselector import ListCssSelector
from beryllium.mongodb import Mongodb
class Page(object):
def __init__(self, field_list: FieldList, name="", is_save=False, mongodb=Mongodb(),
list_css_selector=ListCssSelector(), tab_setup=TabSetup(), x_offset=0, y_offset=0):
"""
:param field_list:
:param name:
:param is_save:
:param mongodb:
:param list_css_selector:
:param tab_setup:
:param x_offset:这个参数在移动端页面的时候使用,在调用driver.move_to_element方法的时候使用
:param y_offset:和上面x_offset的作用一样
"""
self.field_list = field_list
self.name = name
self.is_save = is_save
self.mongodb = mongodb
self.list_css_selector = list_css_selector
self.tab_setup = tab_setup
self.x_offset = x_offset
self.y_offset = y_offset
def __str__(self):
if not self.name or self.field_list is None:
return str(None)
else:
result = {"name": self.name, "is_save": self.is_save}
if self.field_list is not None:
result.setdefault("field_list", str(self.field_list))
if self.is_save:
result.setdefault("mongodb", str(self.mongodb))
if self.list_css_selector is not None:
result.setdefault('list_css_selector', str(self.list_css_selector))
if self.tab_setup is not None:
result.setdefault("tab_setup", str(self.tab_setup))
result.setdefault("x_offset", self.x_offset)
result.setdefault("y_offset", self.y_offset)
return str(result).replace("\\", "")
def __eq__(self, other):
if other is None:
return not self.name or self.field_list is None
else:
if vars(other) == vars(self):
return True
else:
super.__eq__(self, other)
def __iter__(self):
return self
def set_field_list(self, field_list):
self.field_list = field_list
class PageGroup(object):
def __init__(self, *args: Page):
self.iter = iter(args)
self.tuple = args
def __iter__(self):
return self
def __next__(self):
for i in self.iter:
return i
def __str__(self):
return "(%s)" % ",".join([str(i) for i in self.tuple])
def __eq__(self, other):
if other is None or other == []:
return not self
else:
super.__eq__(self, other)
class PageFunc(object):
def __init__(self, func=None, **kwargs):
self.func = func
self.kwargs = kwargs
def set_kwargs(self, **kwargs):
self.kwargs = kwargs
def run(self):
if self.func:
self.func(**self.kwargs)
else:
print("func为空!!!")
class NextPageCssSelectorSetup(object):
def __init__(self, css_selector: str, page: Page, stop_css_selector="", ele_timeout=1, pause_time=1, is_next=True,
is_proxy=True, pre_page_func=PageFunc(), main_page_func=PageFunc(), after_page_func=PageFunc()):
"""
:param css_selector:
:param page:
:param stop_css_selector:
:param ele_timeout:
:param pause_time:
:param is_next:
:param is_proxy:
:param pre_page_func:
:param main_page_func:
:param after_page_func:
"""
self.css_selector = css_selector
self.stop_css_selector = stop_css_selector
self.ele_timeout = ele_timeout
self.pause_time = pause_time
self.is_next = is_next
self.is_proxy = is_proxy
self.page = page
self.pre_page_func = pre_page_func
self.main_page_func = main_page_func
self.after_page_func = after_page_func
def set_main_page_func(self, page_func: PageFunc):
self.main_page_func = page_func
# def __str__(self):
# if not self.css_selector:
# return str(None)
# else:
# return str(vars(self))
#
# def __eq__(self, other):
# if other is None:
# return not self.css_selector
# else:
# if vars(other) == vars(self):
# return True
# else:
# super.__eq__(self, other)
class NextPageLinkTextSetup(object):
def __init__(self, link_text: str, page: Page, ele_timeout=1, pause_time=1, is_next=True, is_proxy=True,
pre_page_func=PageFunc(), main_page_func=PageFunc(), after_page_func=PageFunc()):
"""
:param link_text:
:param page:
:param ele_timeout:
:param pause_time:
:param is_next:
:param is_proxy:
:param pre_page_func:
:param main_page_func:
:param after_page_func:
"""
self.link_text = link_text
self.ele_timeout = ele_timeout
self.pause_time = pause_time
self.is_next = is_next
self.is_proxy = is_proxy
self.page = page
self.pre_page_func = pre_page_func
self.main_page_func = main_page_func
self.after_page_func = after_page_func
def set_main_page_func(self, page_func: PageFunc):
self.main_page_func = page_func
|
"""
The Monty Hall Problem [http://en.wikipedia.org/wiki/Monty_Hall_problem] is a probability brain teaser that has a
rather unintuitive solution.
The gist of it, taken from Wikipedia:
Suppose you're on a game show, and you're given the choice of three doors: Behind one door is a car; behind the others,
goats. You pick a door, say No. 1 [but the door is not opened], and the host, who knows what's behind the doors, opens
another door, say No. 3, which has a goat. He then says to you, "Do you want to pick door No. 2?" Is it to your
advantage to switch your choice? (clarification: the host will always reveal a goat)
Your task is to write a function that will compare the strategies of switching and not switching over many random
position iterations. Your program should output the proportion of successful choices by each strategy. Assume that if
both unpicked doors contain goats the host will open one of those doors at random with equal probability.
If you want to, you can for simplicity's sake assume that the player picks the first door every time. The only aspect
of this scenario that needs to vary is what is behind each door.
Thanks to SleepyTurtle for posting this idea at /r/dailyprogrammer_ideas! Do you have a problem you think would be good
for us! Head on over there and post it!
"""
import numpy as np
import numpy.random as nprnd
import time
TRIALS = 1000000
doors = [1, 0, 0]
start_time = time.time()
keep = 0
switch = 0
for _ in range(TRIALS):
""" permutes doors to create random combination of doors and then runs the keep and switch scenarios simultaneously.
in the case of initially selecting the correct door, the choice of removing one of the other doors does not matter,
so it just chooses the first one.
"""
perm = nprnd.permutation(doors)
keep += perm[0]
zero = np.where(perm[1:] == 0)[0]
perm = np.delete(perm, zero[0]+1)
switch += perm[1]
print('Kept: {}%'.format(100*keep/TRIALS))
print('Switched: {}%'.format(100*switch/TRIALS))
print('Elapsed time: {}'.format(time.time()-start_time))
start_time = time.time()
keep = 0
switch = 0
for _ in range(TRIALS):
""" optimization so that it doesn't actually simulate the monty hall problem. instead it is based on the realization
that if the permutation's first door is correct, the keep scenario always wins and if the first is incorrect the
switch scenario always wins. Time reduced to less than half of the above method.
"""
perm = nprnd.permutation(doors)
if perm[0]:
keep += 1
else:
switch += 1
print('Kept: {}%'.format(100*keep/TRIALS))
print('Switched: {}%'.format(100*switch/TRIALS))
print('Elapsed time: {}'.format(time.time()-start_time))
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
from email_phone_user import version
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ""
setup(
name="django-email-phone-user",
version=version,
packages=find_packages(),
test_suite="nose.collector",
tests_require=["nose", "mock"],
# metadata for upload to PyPI
author="168 Estate Limited",
author_email="info@168.estate",
url="https://github.com/168estate/django-email-phone-user",
description="Django Custom User model with email or phone as username",
long_description=read('README.rst'),
# Full list:
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
license="MIT",
)
|
"""It's not needed to include <!-- mdpo-include-codeblock --> command in
Markdown files, only if code block contents are included in PO files will be
translated directly.
"""
from mdpo.po2md import pofile_to_markdown
def test_include_indented_codeblock(tmp_file):
markdown_input = '''
var hello = "world";
var this;
This must be translated.
var thisCodeMustNotBeEdited = undefined;
'''
markdown_output = ''' var hola = "mundo";
var esto;
Esto debe ser traducido.
var thisCodeMustNotBeEdited = undefined;
'''
pofile_content = '''#
msgid ""
msgstr ""
msgid ""
"var hello = \\"world\\";\\n"
"var this;\\n"
msgstr ""
"var hola = \\"mundo\\";\\n"
"var esto;\\n"
msgid "This must be translated."
msgstr "Esto debe ser traducido."
'''
with tmp_file(pofile_content, '.po') as po_filepath:
output = pofile_to_markdown(markdown_input, po_filepath)
assert output == markdown_output
def test_include_fenced_codeblock(tmp_file):
markdown_input = '''```javascript
var hello = "world";
var this;
```
This must be translated.
```javascript
var thisCodeMustNotBeEdited = undefined;
```
'''
markdown_output = '''```javascript
var hola = "mundo";
var esto;
```
Esto debe ser traducido.
```javascript
var thisCodeMustNotBeEdited = undefined;
```
'''
pofile_content = '''#
msgid ""
msgstr ""
msgid ""
"var hello = \\"world\\";\\n"
"var this;\\n"
msgstr ""
"var hola = \\"mundo\\";\\n"
"var esto;\\n"
msgid "This must be translated."
msgstr "Esto debe ser traducido."
'''
with tmp_file(pofile_content, '.po') as po_filepath:
output = pofile_to_markdown(markdown_input, po_filepath)
assert output == markdown_output
|
from collections import defaultdict
from predict import Predictor
import pandas as pd
import numpy as np
from loaddata import Data_Reader
from IPython import embed
import baostock as bs
import numpy as np
def download_data(code, main):
"""
:param code: type:str ex:sh.600000
:return: type:list
"""
if main:
rs_result = bs.query_history_k_data_plus(code, "date,close,volume", start_date='2017-01-01',
end_date='2021-12-2', frequency="d", adjustflag="3")
else:
rs_result = bs.query_history_k_data_plus(code, "date,close", start_date='2017-01-01', end_date='2021-12-2',
frequency="d", adjustflag="3")
df_result = rs_result.get_data()
return df_result
df = pd.read_csv("data.csv")
res = pd.read_csv("causality.csv")
pdt = Predictor(df, res)
prediction = defaultdict(list)
stocks = pdt.stocks_to_predict()
for compares, (lags, _) in stocks.items():
#print(compares)
n2, n1 = map(int, compares.split(","))
prediction["stock" + str(n2)].append(n1)
print(prediction)
reader = Data_Reader()
reader.read_data()
stock_list = reader.code_list
res_list = []
for compares, (lags, _) in stocks.items():
n2, n1 = map(int, compares.split(","))
print("current mainstock is : ", "stock" + str(n2), "code is ", stock_list[n2 - 1])
mainstock = stock_list[n2 - 1]
for n1 in prediction["stock" + str(n2)]:
print("adding", n1)
res_list.append(stock_list[n1 - 1])
print(res_list)
output_list = []
lg = bs.login()
m = download_data(mainstock, True)
close = list(map(float, list(m["close"])))
volume = list(map(float, list(m["volume"])))
output_list.append(close)
output_list.append(volume)
for j in res_list:
n = download_data(j, False)
# print(len(m), len(n))
# embed()
if len(n) == len(m):
close = list(map(float, list(n["close"])))
output_list.append(close)
print(j, "added")
bs.logout()
for i in range(len(output_list)):
mean = sum(output_list[i]) / len(output_list[i])
for j in range(len(output_list[i])):
output_list[i][j] = output_list[i][j] / mean
# 做差分
for i in range(len(output_list)):
for j in range(len(output_list[i]) - 1):
output_list[i][j] = output_list[i][j + 1] - output_list[i][j]
output_list[i].pop(-1)
data = pd.DataFrame(
{"close": output_list[0], "volume": output_list[1]})
for i in range(2, len(output_list)):
data["stock"+str(i)] = output_list[i]
data.to_csv("grangerdata.csv", index=False)
break
|
from ariadne.contrib.django.views import GraphQLView
from django.contrib import admin
from django.urls import path
from example_project.api.graphql_config import schema
urlpatterns = [
path("admin/", admin.site.urls),
path("graphql/", GraphQLView.as_view(schema=schema), name="graphql"),
]
|
# -*- coding: utf-8 -*-
"""
Generate plots of exponential decay in the image reconstruction error when
using restarted NESTA.
The setup is to solve a smoothed analysis QCBP problem with a TV-Haar analysis
operator to recover an image from subsampled Fourier measurements.
"""
import math
import nestanet.operators as n_op
import nestanet.sampling as n_sp
import nestanet.nn as n_nn
import torch
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
### load image
with Image.open("../images/GPLU_phantom_512.png") as im:
X = np.asarray(im).astype(float) / 255
### parameters
# fixed parameters
eta = [1e0, 1e-1, 1e-2, 1e-3] # noise level
sample_rate = 0.15 # sample rate
outer_iters = 15 # num of restarts + 1
r = 1/4 # decay factor
zeta = 1e-9 # CS error parameter
delta = 0.05 # rNSP parameter
lam = 2.5 # TV-Haar parameter
# inferred parameters (mu and inner_iters are defined later)
eps0 = np.linalg.norm(X,'fro')
N, _ = X.shape # image size (assumed to be N by N)
m = sample_rate*N*N # expected number of measurements
### generate sampling mask
var_hist = n_sp.inverse_square_law_hist_2d(N,1)
var_probs = n_sp.bernoulli_sampling_probs_2d(var_hist,N,m/2)
var_mask = n_sp.generate_sampling_mask_from_probs(var_probs)
num_var_samples = np.sum(var_mask)
uni_mask_cond = np.random.rand(N*N-num_var_samples) <= (m/2)/(N*N-num_var_samples)
uni_mask = np.zeros((N,N), dtype=bool)
uni_mask[~var_mask] = uni_mask_cond
# logical OR the two masks
mask = uni_mask | var_mask
assert np.sum(mask) == np.sum(var_mask)+np.sum(uni_mask)
Image.fromarray(mask).save('restarts-mask.png')
m_exact = np.sum(mask)
mask_t = (torch.from_numpy(mask)).bool().cuda()
print('Image size (number of pixels):', N*N)
print('Number of measurements:', m_exact)
print('Target sample rate:', sample_rate)
print('Actual sample rate:', m_exact/(N*N))
### generate functions for measurement and weight operators
A = lambda x, mode: n_op.fourier_2d(x,mode,N,mask_t,use_gpu=True)*(N/math.sqrt(m))
# compute maximum Haar wavelet resolution
nlevmax = 0
while N % 2**(nlevmax+1) == 0:
nlevmax += 1
assert nlevmax > 0
W = lambda x, mode: n_op.tv_haar_2d(x,mode,N,lam,nlevmax)
L_W = math.sqrt(1+8*lam)
### compute normalizing constant for orthonormal rows of A
e1 = (torch.arange(m_exact) == 0).float().cuda()
c_A = torch.linalg.norm(A(e1,0), 2)**2
c_A = c_A.cpu()
### reconstruct image using restarted NESTA for each eta value
# create variables that are only need to be created once
X_vec_t = torch.from_numpy(np.reshape(X,N*N))
norm_fro_X = np.linalg.norm(X,'fro')
print('Frobenius norm of X:', norm_fro_X)
inner_iters = math.ceil(2*L_W/(r*math.sqrt(N)*delta))
print('Inner iterations:', inner_iters)
mu = []
eps = eps0
for k in range(outer_iters):
mu.append(r*delta*eps)
eps = r*eps + zeta
rel_errs_dict = dict()
for noise_level in eta:
### define the inverse problem
noise = torch.randn(m_exact) + 1j*torch.rand(m_exact)
e = noise_level * noise / torch.linalg.norm(noise,2)
y = A(X_vec_t,1) + e
### compute restarted NESTA solution
z0 = torch.zeros(N*N,dtype=y.dtype)
y = y.cuda()
z0 = z0.cuda()
_, iterates = n_nn.restarted_nesta_wqcbp(
y, z0, A, W, c_A, L_W,
inner_iters, outer_iters, noise_level, mu, True)
### extract restart values
final_its = [torch.reshape(its[-1],(N,N)) for its in iterates]
rel_errs = list()
for X_final in final_its:
X_final = X_final.cpu().numpy()
rel_errs.append(np.linalg.norm(X-X_final,'fro')/norm_fro_X)
rel_errs_dict[noise_level] = rel_errs
### plots
sns.set(context='paper', style='whitegrid')
for noise_level in eta:
end_idx = len(rel_errs_dict[noise_level])+1
plt.semilogy(
range(1,end_idx),
rel_errs_dict[noise_level],
label='$\\eta = 10^{%d}$' % math.log10(noise_level),
marker='*',
linewidth=1)
plt.xlabel('Restart')
plt.ylabel('Relative error')
plt.legend(loc='lower left')
plt.savefig('restarts-plot.png', bbox_inches='tight', dpi=300)
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.settings as s
from cvxpy.problems.solvers.ecos_intf import ECOS
import ecos
class ECOS_BB(ECOS):
"""An interface for the ECOS BB solver.
"""
# Solver capabilities.
LP_CAPABLE = True
SOCP_CAPABLE = True
SDP_CAPABLE = False
EXP_CAPABLE = False
MIP_CAPABLE = True
def name(self):
"""The name of the solver.
"""
return s.ECOS_BB
@staticmethod
def _noncvx_id_to_idx(dims, var_offsets, var_sizes):
"""Converts the nonconvex constraint variable ids in dims into indices.
Parameters
----------
dims : dict
The dimensions of the cones.
var_offsets : dict
A dict of variable id to horizontal offset.
var_sizes : dict
A dict of variable id to variable dimensions.
Returns
-------
tuple
A list of indices for the boolean variables and integer variables.
"""
bool_idx = []
int_idx = []
for indices, constr_type in zip([bool_idx, int_idx],
[s.BOOL_IDS, s.INT_IDS]):
for var_id in dims[constr_type]:
offset = var_offsets[var_id]
size = var_sizes[var_id]
for i in range(size[0]*size[1]):
indices.append(offset + i)
del dims[constr_type]
return bool_idx, int_idx
def get_problem_data(self, objective, constraints, cached_data):
"""Returns the argument for the call to the solver.
Parameters
----------
objective : LinOp
The canonicalized objective.
constraints : list
The list of canonicalized cosntraints.
cached_data : dict
A map of solver name to cached problem data.
Returns
-------
dict
The arguments needed for the solver.
"""
data = super(ECOS_BB, self).get_problem_data(objective, constraints,
cached_data)
sym_data = self.get_sym_data(objective, constraints, cached_data)
bool_idx, int_idx = self._noncvx_id_to_idx(data[s.DIMS],
sym_data.var_offsets,
sym_data.var_sizes)
data[s.BOOL_IDX] = bool_idx
data[s.INT_IDX] = int_idx
return data
def solve(self, objective, constraints, cached_data,
warm_start, verbose, solver_opts):
"""Returns the result of the call to the solver.
Parameters
----------
objective : LinOp
The canonicalized objective.
constraints : list
The list of canonicalized cosntraints.
cached_data : dict
A map of solver name to cached problem data.
warm_start : bool
Not used.
verbose : bool
Should the solver print output?
solver_opts : dict
Additional arguments for the solver.
Returns
-------
tuple
(status, optimal value, primal, equality dual, inequality dual)
"""
data = self.get_problem_data(objective, constraints, cached_data)
# Default verbose to false for BB wrapper.
mi_verbose = solver_opts.get('mi_verbose', False)
results_dict = ecos.solve(data[s.C], data[s.G], data[s.H],
data[s.DIMS], data[s.A], data[s.B],
verbose=verbose,
mi_verbose=mi_verbose,
bool_vars_idx=data[s.BOOL_IDX],
int_vars_idx=data[s.INT_IDX],
**solver_opts)
return self.format_results(results_dict, None,
data[s.OFFSET], cached_data)
|
import logging
import traceback
import requests
import urllib3
from gtin import GTIN, CheckDigitError
logger = logging.getLogger(__name__)
class ApiError(Exception):
pass
class ConnectionError(ApiError):
pass
def is_code_supported_by_gs1_api(code):
code = code.lstrip('0')
try:
GTIN(code)
except CheckDigitError:
return False
if (
code.isdigit()
and len(code) in [8, 11, 12, 13, 14]
and not code.startswith(('190', '967', '977', '978', '979', '99', '150', '169', '2', '922', '178', '161'))
):
return True
return False
class Client:
PROD_HOST = 'https://www.produktywsieci.gs1.pl'
def __init__(self, username, password, host=PROD_HOST):
self.host = host
self.session = requests.Session()
self.username = username
self.password = password
def get_product_by_gtin(self, code):
gtin = Client._normalize_gtin(code)
url = self.host + f"/api/products/{gtin}?aggregation=CREDIBLE"
try:
resp = self.session.get(url=url, auth=(self.username, self.password), timeout=10)
except (requests.exceptions.Timeout, urllib3.exceptions.ReadTimeoutError):
print('Timeout while querying GS1: ', traceback.format_exc())
return None
logger.info('GS1 resp:' + str(resp.status_code))
if resp.status_code != 200:
raise ConnectionError({'status_code': resp.status_code, 'code': code, 'json': resp.json()})
json = resp.json()
if not json.get('GTIN', None):
return None
return json
@staticmethod
def _normalize_gtin(code):
return code.replace('-', '').replace(' ', '')
|
# Addition
print(10 + 5)
float1 = 13.65
float2 = 3.40
print(float1 + float2)
num = 20
flt = 10.5
print(num + flt)
# Subtraction
print(10 - 5)
float1 = -18.678
float2 = 3.55
print(float1 - float2)
num = 20
flt = 10.5
print(num - flt)
# Multiplication
print(40 * 10)
float1 = 5.5
float2 = 4.5
print(float1 * float2)
print(10.2 * 3)
# Division
print(40 / 10)
float1 = 5.5
float2 = 4.5
print(float1 / float2)
print(12.4 / 2)
# Floor Division
print(43 // 10)
float1 = 5.5
float2 = 4.5
print(5.5 // 4.5)
print(12.4 // 2)
# Modulo
print(10 % 2)
twenty_eight = 28
print(twenty_eight % 10)
print(-28 % 10) # The remainder is positive if the right-hand operand is positive
print(28 % -10) # The remainder is negative if the right-hand operand is negative
print(34.4 % 2.5) # The remainder can be a float
# Precedence
# An arithmetic expression containing different operators will be computed on the basis of operator precedence.
# Whenever operators have equal precedence, the expression is computed from the left side:
# Different precedence
print(10 - 3 * 2) # Multiplication computed first, followed by subtraction
# Same precedence
print(3 * 20 / 5) # Multiplication computed first, followed by division
print(3 / 20 * 5) # Division computed first, followed by multiplication
# Parentheses
print((10 - 3) * 2) # Subtraction occurs first
print((18 + 2) / (10 % 8))
|
import FWCore.ParameterSet.Config as cms
# File: BeamHaloSummary_cfi.py
# Original Author: R. Remington, The University of Florida
# Description: Module to build BeamHaloSummary Object and put into the event
# Date: Oct. 15, 2009
BeamHaloSummary = cms.EDProducer("BeamHaloSummaryProducer",
CSCHaloDataLabel = cms.InputTag("CSCHaloData"),
EcalHaloDataLabel = cms.InputTag("EcalHaloData"),
HcalHaloDataLabel = cms.InputTag("HcalHaloData"),
GlobalHaloDataLabel = cms.InputTag("GlobalHaloData"),
## Ecal Loose Id
l_EcalPhiWedgeEnergy = cms.double(10.),
l_EcalPhiWedgeConstituents = cms.int32(6),
l_EcalPhiWedgeToF = cms.double(-200.), ### needs to be tuned when absolute timing in EB/EE is understood w.r.t LHC
l_EcalPhiWedgeConfidence = cms.double(.7),
l_EcalShowerShapesRoundness = cms.double(.41),
l_EcalShowerShapesAngle = cms.double(.51),
l_EcalSuperClusterEnergy = cms.double(10.), # This will be Et
l_EcalSuperClusterSize = cms.int32(3),
## Ecal Tight Id
t_EcalPhiWedgeEnergy = cms.double(20.),
t_EcalPhiWedgeConstituents = cms.int32(8),
t_EcalPhiWedgeToF = cms.double(-200.), ### needs to be tuned when absolute timing in EB/EE is understood w.r.t LHC
t_EcalPhiWedgeConfidence = cms.double(0.9),
t_EcalShowerShapesRoundness = cms.double(.23),
t_EcalShowerShapesAngle = cms.double(0.51),
t_EcalSuperClusterEnergy = cms.double(10.), # This will be Et
t_EcalSuperClusterSize = cms.int32(3),
## Hcal Loose Id
l_HcalPhiWedgeEnergy = cms.double(20.),
l_HcalPhiWedgeConstituents = cms.int32(6),
l_HcalPhiWedgeToF = cms.double(-100.), ### needs to be tuned when absolute timing in HB/HE is understood w.r.t LHC
l_HcalPhiWedgeConfidence = cms.double(0.7),
## Hcal Tight Id
t_HcalPhiWedgeEnergy = cms.double(25.),
t_HcalPhiWedgeConstituents = cms.int32(8),
t_HcalPhiWedgeToF = cms.double(-100.), ### needs to be tuned when absolute timing in HB/HE is understood w.r.t LHC
t_HcalPhiWedgeConfidence = cms.double(0.9),
# strips of problematic cells in HCAL min cut
problematicStripMinLength = cms.int32(6)
)
|
# https://leetcode.com/problems/edit-distance/
import sys
# import random
class Solution(object):
def _memoize(f):
memo = {}
def wrapper(self, *args):
if args not in memo:
memo[args] = f(self, *args)
return memo[args]
return wrapper
@_memoize
def minDistance(self, w1, w2):
l1, l2 = len(w1), len(w2)
i = next((i for i in range(min(l1, l2)) if w1[i] != w2[i]), None)
if i is None:
# i) w1 and w2 are equal => 0
# ii) w1 is shorter than w2 => insert missing chars
# iii) w2 is longer than w2 => delete extra chars
return abs(l1 - l2)
else:
operations = [
lambda: self.minDistance(w1[i:], w2[i+1:]), # insert
lambda: self.minDistance(w1[i+1:], w2[i:]), # delete
lambda: self.minDistance(w1[i+1:], w2[i+1:]), # replace
]
# random.shuffle(operations)
d = max(l1, l2)
for op in operations:
d = min(d, 1 + op())
if d == 1:
break
return d
if __name__ == '__main__':
s = Solution()
f = sys.stdin
num_test_cases = int(f.readline())
for i in range(num_test_cases):
w1 = f.readline().rstrip()
w2 = f.readline().rstrip()
print(s.minDistance(w1, w2))
|
# Generated by Django 3.1.13 on 2021-09-20 12:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pokemon_entities', '0009_pokemon_evolution'),
]
operations = [
migrations.RenameField(
model_name='pokemon',
old_name='evolution',
new_name='next_evolution',
),
]
|
import numpy as np
import matplotlib.pyplot as plt
import os, re, shutil
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize as norm2
import tensorflow as tf
def get_minute_data(data):
return data.asfreq('T', method='bfill')
def read_data(filename, ticker, freq='raw'):
data = pd.read_csv(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'data', filename)))
if ticker:
data = data[data['ticker'] == ticker]
data['timestamp'] = pd.to_datetime(data.timestamp)
# data = data[['high', 'low', 'price', 'volume', 'timestamp']].sort_values(by='timestamp')
data = data[['high', 'low', 'price', 'timestamp']].sort_values(by='timestamp') # Experimenting
data = data.set_index('timestamp')
if freq == 'm':
data = get_minute_data(data).values
# data = norm2(data, axis=0) # If 1, independently normalize each sample, otherwise (if 0) normalize each feature.
return data
def normalize(data):
"""
axis = 1, along each column, mean of all rows
axis = 0, along each row, mean of all cols
"""
return (data - np.mean(data, axis=0, keepdims=True)) / np.sqrt(np.var(data, axis=0, dtype=np.float64, keepdims=True))
def generate_datasets(data):
# price, high, low, volume at time N are the x-vars
# price at time N + 1 is the y-var
X = data[['price', 'high', 'low', 'volume']][0: -1]
Y = data[['price']][1:]
X = (X.values)
Y = (Y.values)
assert (X.shape[0] == Y.shape[0]) # number of samples match
assert (X.shape[1] == 4)
assert (Y.shape[1] == 1)
X = normalize(X)
Y = normalize(Y)
# X = norm2(X, axis=0) # Currently disabled
# Y = norm2(Y, axis=0) # # Currently disabled
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.33, random_state=42)
# Due to the differences between Keras and the hand-coded forward/backward prop implementations
# the orientation of the data is different. shape = (row, col), where row = # samples, col = # features
# Therefore, transposition is not necessary
# X_train = X_train.T
# X_test = X_test.T
# Y_train = Y_train.T
# Y_test = Y_test.T
return X_train, X_test, Y_train, Y_test
def evaluate_result(pred, x, y, mode):
plt.plot(np.squeeze(pred)[0:100], marker=None,
color='red', markersize=1, linewidth=1)
plt.plot(np.squeeze(y)[0:100], marker=None,
color='blue', markersize=1, linewidth=1)
plt.ylabel('normalized price')
plt.xlabel('time step')
plt.title(mode + " Predicted Prices")
plt.legend(['predict', 'true'], loc='upper left')
plt.show()
def plot_trades(EP, prices, actions, permitted_trades=None, name='', path=None):
if permitted_trades is None:
num_plots = 1
else:
num_plots = 2
def get_buys_sells(actions):
buys, sells = {}, {}
buys['x'] = []
buys['y'] = []
sells['x'] = []
sells['y'] = []
for i, action in enumerate(actions):
if action == 0:
buys['x'].append(i)
buys['y'].append(prices[i])
elif action == 1:
sells['x'].append(i)
sells['y'].append(prices[i])
return buys, sells
plt.clf()
plt.subplot(num_plots,1,1)
plt.plot(prices, linewidth=1, color='#808080')
buys, sells = get_buys_sells(actions)
plt.plot(buys['x'], buys['y'], '.', markersize=2, color='g')
plt.plot(sells['x'], sells['y'], '.', markersize=2, color='r')
plt.ylabel('Prices')
plt.xlabel('Timesteps')
plt.title('Agent\'s Actions (EP: %s)' % EP)
# Permitted Trades
if permitted_trades is not None and len(permitted_trades) != 0:
plt.subplot(2,1,2)
plt.plot(prices, linewidth=1, color='#808080')
p_buys, p_sells = get_buys_sells(permitted_trades)
plt.plot(p_buys['x'], p_buys['y'], '.', markersize=2, color='g')
plt.plot(p_sells['x'], p_sells['y'], '.', markersize=2, color='r')
plt.ylabel('Prices')
plt.xlabel('Timesteps')
plt.title('Permitted Actions')
if path == None:
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'logs', str(get_latest_run_count() - 1), 'test_trades'))
if os.path.isdir(path) is False:
os.mkdir(path)
plt.savefig(path + '/{0}EP{1}.png'.format(name, EP), dpi=400)
def plot_reward(rewards):
plt.clf()
plt.plot(rewards)
plt.ylabel('Avg Reward')
plt.xlabel('Timesteps')
plt.draw()
plt.pause(0.001)
# plt.show()
def plot_data(rewards, costs, q_vals, drawdown):
plt.clf()
avg_rewards = np.empty(len(rewards))
avg_rewards.fill(np.mean(rewards))
avg_rewards = avg_rewards.tolist()
plt.subplot(4,1,1)
plt.plot(rewards)
plt.plot(avg_rewards, color='yellow')
plt.ylabel('Rewards')
plt.xlabel('Timesteps')
plt.subplot(4,1,2)
plt.plot(drawdown)
plt.ylabel('Drawdown')
plt.xlabel('Timesteps')
plt.subplot(4,1,3)
plt.plot(costs)
plt.ylabel('Costs')
plt.xlabel('Timesteps')
plt.subplot(4,1,4)
plt.plot(q_vals)
plt.ylabel('Q Value Variance')
plt.xlabel('Timesteps')
plt.draw()
plt.pause(0.001)
def generateTimeSeriesBatches(data, input_size, num_steps):
seq = [np.array(data[i * input_size: (i + 1) * input_size])
for i in range(len(data) // input_size)]
# Split into groups of `num_steps`
X = np.array([seq[i: i + num_steps] for i in range(len(seq) - num_steps)])
y = np.array([seq[i + num_steps] for i in range(len(seq) - num_steps)])
return X, y
def variable_summaries(var):
out = []
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
# Taken from https://www.tensorflow.org/guide/summaries_and_tensorboard
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
out.append(tf.summary.scalar(var.op.name + '_mean', mean))
# with tf.name_scope('stddev'):
# stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
# out.append(tf.summary.scalar(var.op.name + '_stddev', stddev))
# out.append(tf.summary.scalar(var.op.name + '_max', tf.reduce_max(var)))
# out.append(tf.summary.scalar(var.op.name + '_min', tf.reduce_min(var)))
out.append(tf.summary.histogram(var.op.name + '_histogram', var))
return out
def log_histogram(writer, tag, values, step, bins=1000):
"""Logs the histogram of a list/vector of values."""
# Convert to a numpy array
values = np.array(values)
# Create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
writer.add_summary(summary, step)
writer.flush()
def log_scalars(writer, tag, values, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=values)])
writer.add_summary(summary, step)
writer.flush()
def cleanup_logs():
pattern = 'events.out.tfevents.*'
log_dir_pattern = 'train_*'
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'logs'))
parent_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
for f in os.listdir(path):
if re.search(pattern, f):
os.remove(os.path.join(path, f))
for f in os.listdir(parent_path):
if re.search(log_dir_pattern, f):
shutil.rmtree(os.path.join(parent_path, f), ignore_errors=True)
def get_latest_run_count(root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'logs'))):
dirs = [name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name))]
return len(dirs)
# if len(dirs) == 0:
# return 0
# else:
# return int(max(dirs)) + 1
def update_target_graph(from_scope, to_scope):
# Get the parameters of our DQNNetwork
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
# Get the parameters of our Target_network
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
# Update our target_q_network parameters with q_network parameters
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
def test_trades(agent, i, plot_path, runs=1, plot_freq=10):
agent.isTrain = False
test_reward_list = []
for run in range(int(runs)):
state = agent.env.reset()
reward_list = []
actions = []
prices = []
done = False
while done is False:
prices.append(state[0][2])
action = agent.act(state) # direct action for test
state, reward, done, _ = agent.env.step(action)
actions.append(action)
reward_list.append(reward)
test_reward_list.append(np.mean(reward_list))
log_scalars(agent.summary_writer, 'Test Mean Reward', np.mean(test_reward_list), i)
if i % plot_freq == 0:
plot_trades(i, prices, actions, agent.env.permitted_trades, path=plot_path)
def copy_config(log_path, cfg_path, run_count):
dest_path = os.path.join(log_path, run_count)
if os.path.isdir(dest_path) is False:
os.mkdir(dest_path)
shutil.copyfile(cfg_path, os.path.join(dest_path, 'config.ini')) |
import os
import pytest
from ..conftest import read_json
from ..conftest import TESTING_CONFIG_DIR
test_config = read_json(
os.path.join(TESTING_CONFIG_DIR, "default/unit/test-config_robots-txt.json",)
)
class TestRobotsTxt:
@pytest.mark.v4_20
@pytest.mark.parametrize(
"test_input,expected", test_config["robots-txt"]["valid"]["input-expected"]
)
def test_valid(self, config, session, test_input, expected):
"""Test robots.txt."""
# Arrange
url = f"{config.BASE_URL}/robots.txt"
# Act
resp = session.get(url)
# Assert
assert resp.url == url
assert resp.status_code == 200
assert resp.encoding == expected["encoding"]
assert resp.headers["Content-Type"] == expected["content-type"]
# Cleanup
|
import os
from flask import send_from_directory
from api.app import create_app
settings_module = os.getenv("APP_SETTINGS_MODULE")
app = create_app(settings_module)
@app.route("/")
def serve():
return send_from_directory(app.static_folder, "index.html") |
#!/usr/bin/env python3
import os
import signal
import subprocess
import time
from threading import Timer
import atexit
import logging
import sys
timeout_sec = 30
logger = logging.getLogger('repeat_test')
hdlr = logging.FileHandler('./repeat_test.log', 'w')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def kill_proc(proc, timeout):
timeout["value"] = True
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
global p
p=None
def cleanup():
global run
run=False
if(p is not None):
print('Stopping subprocess with pid: ', str(p.pid))
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
print('Stopped!')
args=""
for arg in sys.argv[1:]:
args+="'"+arg+"' "
cmd = "python3 depthai-demo.py " + args
logger.info(cmd)
atexit.register(cleanup)
while True:
p = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid)
timeout = {"value": False}
timer = Timer(timeout_sec, kill_proc, [p, timeout])
timer.start()
p.wait()
timer.cancel()
return_code = p.returncode
p=None
if(timeout["value"]):
logger.info("returned succesfully")
else:
logger.info("returned with error code: " + str(return_code))
time.sleep(5)
|
from typing import (List,
Tuple)
from tests.utils import (RawPointsList,
RawPolygon,
enum_to_values)
from wagyu.bound import Bound as PortedBound
from wagyu.box import Box as PortedBox
from wagyu.edge import Edge as PortedEdge
from wagyu.enums import (EdgeSide as PortedEdgeSide,
FillKind as PortedFillKind,
OperationKind as PortedOperationKind,
PolygonKind as PortedPolygonKind)
from wagyu.intersect_node import IntersectNode as PortedIntersectNode
from wagyu.linear_ring import LinearRing as PortedLinearRing
from wagyu.local_minimum import (LocalMinimum as PortedLocalMinimum,
LocalMinimumList as PortedLocalMinimumList)
from wagyu.point import Point as PortedPoint
from wagyu.polygon import (Multipolygon as PortedMultipolygon,
Polygon as PortedPolygon)
from wagyu.ring import Ring as PortedRing
from wagyu.ring_manager import RingManager as PortedRingManager
from wagyu.wagyu import Wagyu as PortedWagyu
PortedBound = PortedBound
PortedBox = PortedBox
PortedEdge = PortedEdge
PortedEdgeSide = PortedEdgeSide
PortedFillKind = PortedFillKind
PortedIntersectNode = PortedIntersectNode
PortedLinearRing = PortedLinearRing
PortedLinearRingWithPolygonKind = Tuple[PortedLinearRing, PortedPolygonKind]
PortedLocalMinimum = PortedLocalMinimum
PortedLocalMinimumList = PortedLocalMinimumList
PortedMultipolygon = PortedMultipolygon
PortedOperationKind = PortedOperationKind
PortedPoint = PortedPoint
PortedPolygon = PortedPolygon
PortedPolygonKind = PortedPolygonKind
PortedRing = PortedRing
PortedRingManager = PortedRingManager
PortedWagyu = PortedWagyu
ported_edges_sides = enum_to_values(PortedEdgeSide)
ported_fill_kinds = enum_to_values(PortedFillKind)
ported_operation_kinds = enum_to_values(PortedOperationKind)
ported_polygon_kinds = enum_to_values(PortedPolygonKind)
def to_ported_linear_rings_points(raw_points: RawPointsList
) -> List[PortedPoint]:
points = [PortedPoint(x, y) for x, y in raw_points]
return points + [points[0]]
def to_ported_polygon_linear_rings(raw_polygon: RawPolygon
) -> List[PortedLinearRing]:
raw_border, raw_holes = raw_polygon
return ([PortedLinearRing(to_ported_linear_rings_points(raw_border))]
+ [PortedLinearRing(to_ported_linear_rings_points(raw_hole))
for raw_hole in raw_holes])
def to_ported_local_minimum_list(linear_rings_with_polygon_kinds
: List[PortedLinearRingWithPolygonKind]
) -> PortedLocalMinimumList:
result = PortedLocalMinimumList()
for linear_ring, polygon_kind in linear_rings_with_polygon_kinds:
result.add_linear_ring(linear_ring, polygon_kind)
return result
|
#! /usr/bin/env python
#
#
# @file: meta
# @time: 2022/01/29
# @author: Mori
#
import bson
from schema import Schema
from moreover.base.logger import gen_logger
from typing import Dict, List, Tuple, Union
from motor import MotorClient, MotorDatabase, MotorCollection
from motor.core import (
AgnosticClient,
AgnosticDatabase,
AgnosticCollection,
AgnosticCursor,
)
from moreover.base.config import global_config, define
logger = gen_logger("orm")
CursorOrList = Union[List, AgnosticCursor]
define("MONGO_URI", default_value="MONGO_URI")
define("MONGO_DB", default_value="MONGO_DB")
class MotorMeta(type):
__db = None
__client = None
__cached_collection = {}
@classmethod
def get_client(cls) -> Union[MotorClient, AgnosticClient]:
if not cls.__client:
cls.__client = MotorClient(global_config.MONGO_URI)
return cls.__client
@classmethod
def get_db(cls) -> Union[MotorDatabase, AgnosticDatabase]:
if not cls.__db:
cls.__db = cls.get_client().get_database(global_config.MONGO_DB)
return cls.__db
@classmethod
def read_collection(
cls, collection_name: str
) -> Union[MotorCollection, AgnosticCollection]:
# print(collection_name)
if collection_name not in cls.__cached_collection:
cls.__cached_collection[collection_name] = cls.get_db().get_collection(
collection_name
)
return cls.__cached_collection[collection_name]
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
setattr(new_cls, "client", cls.get_client())
setattr(new_cls, "db", cls.get_db())
target_collection = cls.read_collection(name)
setattr(new_cls, "collection", target_collection)
for method_or_attr in filter(
lambda x: not x.startswith("__") and not hasattr(cls, x),
dir(target_collection),
):
setattr(new_cls, method_or_attr, getattr(target_collection, method_or_attr))
schema = getattr(new_cls, "schema")
indexs = getattr(new_cls, "indexs")
if schema is not None and not isinstance(schema, Schema):
raise ValueError(f"{name} orm class: schema is not schema.Schema")
if schema is None:
logger.warning(f"{name} orm class schema is empty")
if not isinstance(indexs, list):
raise ValueError(f"{name} orm class: indexs is not list")
if len(indexs) == 0:
logger.warning(f"{name} orm class indexs is empty")
logger.info(f"{name} orm class inited")
return new_cls
class Collection(object, metaclass=MotorMeta):
db: MotorDatabase
client: MotorClient
collection: MotorCollection
schema: Schema = None
indexs = []
def __init__(self, document: Dict) -> None:
super(Collection, self).__init__()
self.__document = document
if self.schema:
self.__document = self.schema.validate(document)
@property
def document(self):
return self.__document
def update_document(self, **kwargs):
if self.schema:
self.__document = self.schema.validate({**self.__document, **kwargs})
else:
self.__document.update(kwargs)
async def save(self):
if "_id" in self.__document and self.__document["_id"] is not None:
await self.collection.replace_one(
{"_id": self.__document["_id"]}, self.__document
)
else:
insert_result = await self.collection.insert_one(self.__document)
self.__document.update({"_id": insert_result.inserted_id})
return self.document
@classmethod
async def get(
cls,
filter: Dict = None,
projection: Dict = None,
sort: List[Tuple[str, int]] = None,
limit: int = None,
offset: int = None,
with_count: bool = False,
return_cursor: bool = False,
) -> Union[Tuple[CursorOrList, int], Tuple[object, int]]:
cursor = cls.query(
filter=filter, projection=projection, sort=sort, limit=limit, offset=offset
)
count = None
if with_count:
count = await cls.count_documents(filter)
if limit == 1:
[cls(item) async for item in cursor][0], count
if return_cursor:
return cursor, count
else:
data = [cls(item) async for item in cursor]
return data, count
@classmethod
async def get_one(cls, id_or_filter: Union[bson.ObjectId, Dict]):
if isinstance(id_or_filter, bson.ObjectId):
document = await cls.find_one({"_id": id_or_filter})
else:
document = await cls.find_one(id_or_filter)
return cls(document)
|
def sql_format(sql, replaceDict):
for item in replaceDict.items():
sql = sql.replace(str(item[0]), str(item[1]))
return sql
def getBookConnect():
return pymysql.connect(host="xxx.com", user="xxx", password="xxx", database="xxx", charset='utf8')
import openpyxl
def read_isbn(file, colnames, sheet="Sheet1", line_num=10):
wb = openpyxl.load_workbook(file)
ws = wb[sheet]
for colname in colnames:
printISBN(colname, ws, line_num)
def printISBN(colname, ws, line_num=10):
print(f"\n\n-- Current Colnum:{colname}")
isbn_list = ws[colname]
count = 1
length = len(isbn_list)
for isbn in isbn_list:
isbn = isbn.value
isbn = str(isbn).replace("-", "")
if count == length:
print(f"{isbn}", )
else:
print(f"{isbn},", end="")
if count % line_num == 0 and count != length:
print("")
count = count + 1
# ----------------------------------------------
sql_temp_update = '''
UPDATE book_keyword bk
SET is_delete = 1
WHERE bk.book_id = {bookID} AND bk.is_delete = 0 AND classify_id <> 0;\n
'''
sql_temp_insert = '''
INSERT INTO `book_keyword` (`keyword_id`, `book_group_id`, `book_id`, `channel_id`, `classify_id`, `rank`, `set_type`, `is_delete`, `create_user`, `create_time`, `update_user`, `update_time`, `is_warehouse`, `warehouse_id`, `is_edit`)(
SELECT `keyword_id`, `book_group_id`, `book_id`, `channel_id`, {classifyID} `classify_id`, `rank`, `set_type`, `is_delete`, `create_user`, `create_time`, `update_user`, `update_time`, `is_warehouse`, `warehouse_id`, `is_edit`
FROM
book_keyword bk
WHERE
bk.book_id = {bookID} AND bk.is_delete = 0 AND classify_id = 0
);\n
'''
bookIds = [2966350, 2966351, 2984439, 4120189, 4534243, 4535159, 4550713, 4551599, 4551655, 4782047,
4782146, 4782235, 4782686, 4786228, 4786768, 4792270, 4792271, 4910500, 4910828, 4910844,
4911121, 4911552, 4911584, 4911664, 4911704, 4911709, 4911715, 4912281, 4912282, 4912755,
4912833, 4912834, 4912839, 4912840, 4912842, 4912843, 4912844, 4912846, 4912848, 4912859,
4912860, 4912861, 4912862, 4912863, 4912864, 4912865, 4912866, 4912867, 4912869, 4912870,
4912931, 4912958, 4912971, 4912972, 4912973, 4913072, 4913843, 4913903, 4915161, 4956845,
4994546, 4994830, 4994905, 5003814, 5011313, 5011427, 5011432]
sql_get_classify_id = '''
SELECT DISTINCT classify_id FROM book_keyword bk
WHERE bk.book_id = {bookID} and classify_id <> 0;
'''
def get_classify_ids(conn, bookId):
sql = sql_format(sql_get_classify_id, {"{bookID}": bookId})
cursor = conn.cursor()
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
return [x[0] for x in data]
with open("update.sql", "w") as f:
for bookId in bookIds:
f.write(sql_format(sql_temp_update, {"{bookID}": bookId}))
with open("insert.sql", "w") as f, getBookConnect() as conn:
for bookId in bookIds:
classify_ids = get_classify_ids(conn, bookId)
for classify_id in classify_ids:
f.write(sql_format(sql_temp_insert, {"{bookID}": bookId, "{classifyID}": classify_id}))
# ----------------------------------------------
import math
import pymysql
import datetime
from openpyxl import Workbook
def calc_entroy(name: str):
chars = {}
for c in name:
if c != '群':
if c not in chars:
chars[c] = 1
else:
chars[c] = chars[c] + 1
sumValue = 0
for key in chars:
sumValue += chars[key]
value = 0
for key in chars:
# value = value + chars[key]* math.log(chars[key]/sumValue)
value = value + chars[key] * math.log(chars[key])
value = value / sumValue
return value
stop_word = ["的", "地", "我"]
def calc_base(name: str):
# 1. 包含三个数字, 字母
# 2. 包含 社群
# 3. 包含任意相同的三个字符 例如 心心心心心心心心心心1群
# 4. 包含停止词和其他无意义词汇 的 地 我
chars = {}
c_alpha_and_num = 0
if "社群" in name:
return "group"
for c in name:
if 'a' < c < 'z' or 'A' < c < 'Z' or '0' < c < '9':
c_alpha_and_num = c_alpha_and_num + 1
if c != '群':
if c not in chars:
chars[c] = 1
else:
chars[c] = chars[c] + 1
if c in stop_word:
return "stop"
if c_alpha_and_num >= 4:
return "alpha"
for key in chars:
if chars[key] >= 3:
return "multi"
return None
columns = ['id', 'classify_id', 'group_name', 'weixin_qrcode_id',
'weixin_group_id', 'user_number', 'create_time']
sql = f"SELECT {', '.join(columns)} FROM book_group_qrcode WHERE user_number < 16 AND create_time < '2019-11-23';"
def insertHead(sheet):
for i, name in enumerate(columns):
sheet.cell(row=1, column=i + 1, value=name)
sheet.cell(row=1, column=i + 2, value='熵值')
sheet.cell(row=1, column=i + 3, value='属性')
wb = Workbook()
sheet = wb.create_sheet("群名称熵值分析", index=0)
with getBookConnect() as cursor:
cursor.execute(sql)
resultSet = cursor.fetchall()
insertHead(sheet)
for i, item in enumerate(resultSet, start=2):
for col, name in enumerate(columns):
sheet.cell(row=i, column=col + 1, value=item[col])
sheet.cell(row=i, column=col + 2, value=calc_entroy(item[2]))
sheet.cell(row=i, column=col + 3, value=calc_base(item[2]))
wb.save(f"群名称熵值分析({datetime.datetime.now().strftime('%Y-%m-%d-%H')}).xlsx")
|
# Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def rename_key(key):
"""Rename state_dict key."""
# ResidualBlockWithStride: 'downsample' -> 'skip'
if ".downsample.bias" in key or ".downsample.weight" in key:
return key.replace("downsample", "skip")
return key
def load_pretrained(state_dict):
"""Convert state_dict keys."""
state_dict = {rename_key(k): v for k, v in state_dict.items()}
return state_dict
|
import sys
from htchirp import condor_chirp
def main():
return condor_chirp(sys.argv[1:], True)
|
from keras import backend as K
from keras.layers import Layer
from keras.initializers import Ones, Zeros
class LayerNormalization(Layer):
def __init__(self, eps=1e-6, **kwargs):
self._eps = eps
self._gamma = None
self._beta = None
super().__init__(**kwargs)
def build(self, input_shape):
self._gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True)
self._beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True)
super().build(input_shape)
def call(self, inputs, **kwargs):
# In layer normalization, instances are handled independent of each other.
# Normalize on the feature (last) dimension for each instance.
# Not handling masks for now..
mean = K.mean(inputs, axis=-1, keepdims=True)
std = K.std(inputs, axis=-1, keepdims=True)
return self._gamma * (inputs - mean) / (std + self._eps) + self._beta
def compute_output_shape(self, input_shape):
return input_shape
|
from pomdpy.discrete_pomdp import DiscreteObservation
from .util import observation_to_index, index_to_observation
class TrafficLightObservation(DiscreteObservation):
def __init__(self, measurements):
super().__init__(observation_to_index(measurements))
self.wavelength_observed = measurements[0]
self.distance_observed = measurements[1]
self.speed = measurements[2]
def copy(self):
return TrafficLightObservation((self.wavelength_observed, self.distance_observed, self.speed))
def equals(self, other_observation):
return self.wavelength_observed == other_observation.wavelength_observed and self.distance_observed == other_observation.distance_observed and self.speed == other_observation.speed
def distance_to(self, other_observation):
return 0 if self.equals(other_observation) else 1
def __hash__(self):
return int(self.bin_number)
def print_observation(self):
print(self.to_string())
def to_string(self):
return "[{}-wavelength, {} units from car moving at {} units/sec]".format(self.wavelength_observed, self.distance_observed, self.speed)
|
import logging
from datetime import datetime
from django.db.models import Q
from django.shortcuts import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import MethodNotAllowed
from rest_framework.response import Response
from rest_framework_json_api.views import ReadOnlyModelViewSet
from core.data_viewmodels import (
NodeTimeseriesListViewModel,
NodeTimeseriesViewModel,
InstallationTimeseriesListViewModel,
InstallationTimeseriesViewModel,
)
from core.models import Node, RoomNodeInstallation
from core.serializers import (
NodeTimeseriesListSerializer,
NodeTimeseriesSerializer,
InstallationTimeseriesListSerializer,
InstallationTimeSeriesSerializer,
)
logger = logging.getLogger(__name__)
class NodeTimeSeriesViewSet(ReadOnlyModelViewSet):
permission_classes = [IsAuthenticated]
queryset = Node.objects.all()
# Use different serializers for different actions.
# See https://stackoverflow.com/questions/22616973/django-rest-framework-use-different-serializers-in-the-same-modelviewset
serializer_classes = {
"list": NodeTimeseriesListSerializer,
"retrieve": NodeTimeseriesSerializer,
}
serializer_class = NodeTimeseriesListSerializer # fallback
def get_queryset(self, *args, **kwargs):
queryset = super().get_queryset()
# Restrict to samples from nodes commanded by the currently authenticated user.
authorized_nodes = queryset.filter(owner__users=self.request.user)
if self.action == "retrieve":
# Restrict the samples to a node, if one is given.
if "node_pk" in self.kwargs:
# If the view is accessed via the `node-samples-list` route, it will
# have been passed the node_pk as lookup_field.
node_id = self.kwargs["node_pk"]
elif "pk" in self.kwargs:
# If the view is accessed via the `timeseries-details` route, it will
# have been passed the pk as lookup_field.
node_id = self.kwargs["pk"]
else:
raise MethodNotAllowed
logger.debug("Retrieve time series for individual node %s", node_id)
return get_object_or_404(authorized_nodes, pk=node_id)
elif self.action == "list":
logger.debug("Retrieve time series overview of all accessible nodes.")
return authorized_nodes
else:
raise MethodNotAllowed
def get_serializer_class(self):
# TODO: Does not work for related fields because of this upstream bug:
# https://github.com/django-json-api/django-rest-framework-json-api/issues/859
return self.serializer_classes.get(self.action, self.serializer_class)
def list(self, request):
queryset = self.get_queryset()
ts_info = [
NodeTimeseriesListViewModel(
pk=node.pk, node_alias=node.alias, sample_count=node.samples.count()
)
for node in queryset
]
serializer_class = self.get_serializer_class()
serializer = serializer_class(ts_info, many=True, context={"request": request})
return Response(serializer.data)
def get_object(self):
node = self.get_queryset()
queryset = node.samples
from_limit = int(self.request.query_params.get("filter[from]", 0))
to_limit = int(
self.request.query_params.get(
"filter[to]", round(datetime.now().timestamp())
)
)
logger.debug(
"Limiting the time series to the time slice from %s to %s",
from_limit,
to_limit,
)
samples = queryset.filter(
timestamp_s__gte=from_limit, timestamp_s__lte=to_limit
)
return NodeTimeseriesViewModel(
pk=node.pk,
node_alias=node.alias,
from_timestamp_s=from_limit,
to_timestamp_s=to_limit,
samples=samples,
)
class InstallationTimeSeriesViewSet(ReadOnlyModelViewSet):
"""A view for time series accessed by installation. Each installation has its own time series, constrained by the time slice of the installation."""
queryset = RoomNodeInstallation.objects.all()
# Use different serializers for different actions.
# See https://stackoverflow.com/questions/22616973/django-rest-framework-use-different-serializers-in-the-same-modelviewset
serializer_classes = {
"list": InstallationTimeseriesListSerializer,
"retrieve": InstallationTimeSeriesSerializer,
}
serializer_class = InstallationTimeseriesListSerializer # fallback
def get_queryset(self, *args, **kwargs):
queryset = super().get_queryset()
is_public = Q(is_public=True)
accessible_if_authenticated = Q(node__owner__users=self.request.user)
if not self.request.user.is_authenticated:
# Public API access. Restrict the listed installations to those that are
# publicly visible.
authorized_installations = queryset.filter(is_public)
else:
# Otherwise, Restrict to samples from installations commanded by the
# currently logged-in user.
authorized_installations = queryset.filter(
is_public | accessible_if_authenticated
)
if self.action == "retrieve":
# Restrict the samples to a specific installation, if one is given.
if "installation_pk" in self.kwargs:
# If the view is accessed via the `installations-samples-list` route,
# it will have been passed the installation_pk as lookup_field.
installation_id = self.kwargs["installation_pk"]
elif "pk" in self.kwargs:
# If the view is accessed via the `installation-timeseries-details`
# route, it will have been passed the pk as lookup_field.
installation_id = self.kwargs["pk"]
else:
raise MethodNotAllowed
logger.debug(
"Retrieve time series for individual node installation %s",
installation_id,
)
return get_object_or_404(authorized_installations, pk=installation_id)
elif self.action == "list":
logger.debug(
"Retrieve time series overview of all accessible node installations."
)
return authorized_installations
else:
raise MethodNotAllowed
def get_serializer_class(self):
# TODO: Does not work for related fields because of this upstream bug:
# https://github.com/django-json-api/django-rest-framework-json-api/issues/859
return self.serializer_classes.get(self.action, self.serializer_class)
def list(self, request):
queryset = self.get_queryset()
ts_info = [
InstallationTimeseriesListViewModel(
pk=installation.pk,
node_id=installation.node.id,
node_alias=installation.node.alias,
from_timestamp_s=installation.from_timestamp_s,
to_timestamp_s=installation.to_timestamp_s,
sample_count=installation.node.samples.filter(
timestamp_s__gte=installation.from_timestamp_s,
timestamp_s__lte=installation.to_timestamp_s,
).count(),
)
for installation in queryset
]
serializer_class = self.get_serializer_class()
serializer = serializer_class(ts_info, many=True, context={"request": request})
return Response(serializer.data)
def get_object(self):
installation = self.get_queryset()
queryset = installation.node.samples.all()
# Limit time-slice to node installation and query.
install_from_s = installation.from_timestamp_s
install_to_s = installation.to_timestamp_s
filter_from_s = int(self.request.query_params.get("filter[from]", 0))
filter_to_s = int(
self.request.query_params.get(
"filter[to]", round(datetime.now().timestamp())
)
)
from_max_s = max(install_from_s, filter_from_s)
to_min_s = min(install_to_s, filter_to_s)
logger.debug(
"Limiting the time series to the time slice from %s to %s",
from_max_s,
to_min_s,
)
samples = queryset.filter(
timestamp_s__gte=from_max_s, timestamp_s__lte=to_min_s
).distinct()
return InstallationTimeseriesViewModel(
pk=installation.pk,
node_id=installation.node.id,
node_alias=installation.node.alias,
from_timestamp_s=from_max_s,
to_timestamp_s=to_min_s,
samples=samples,
)
|
"""
Deletes records for participants who are missing from the person table.
Developed to assist drop_participants_without_ppi_or_ehr.py in removing records
"""
# Python imports
# Third party imports
# Project imports
import common
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.cdr_cleaner import clean_cdr as cdr_consts
NON_PID_TABLES = [
common.CARE_SITE, common.LOCATION, common.FACT_RELATIONSHIP, common.PROVIDER
]
TABLES_TO_DELETE_FROM = set(common.AOU_REQUIRED +
[common.OBSERVATION_PERIOD]) - set(NON_PID_TABLES +
[common.PERSON])
SELECT_QUERY = common.JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{sandbox_dataset}}.{{sandbox_table}}` AS
SELECT *
""")
# Delete rows in tables where the person_id is not in the person table
RECORDS_FOR_NON_EXISTING_PIDS = common.JINJA_ENV.from_string("""
{{query_type}}
FROM `{{project}}.{{dataset}}.{{table}}`
WHERE person_id NOT IN
(SELECT person_id
FROM `{{project}}.{{dataset}}.person`)
""")
ISSUE_NUMBERS = ["DC584", "DC706"]
class DropMissingParticipants(BaseCleaningRule):
"""
Drops participant data for pids missing from the person table
"""
def __init__(self, issue_numbers, description, affected_datasets,
affected_tables, project_id, dataset_id, sandbox_dataset_id,
namer):
desc = f'Sandbox and remove rows for PIDs missing from the person table.'
super().__init__(
issue_numbers=list(set(ISSUE_NUMBERS) | set(issue_numbers)),
description=f'{description} AND {desc}',
affected_datasets=affected_datasets,
affected_tables=list(TABLES_TO_DELETE_FROM),
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
table_namer=namer)
def get_query_specs(self):
"""
Return a list of queries to remove data for missing persons.
Removes data from person_id linked tables for any persons which do not
exist in the person table.
:return: A list of string queries that can be executed to delete data from
other tables for non-person users.
"""
query_list = []
for table in self.affected_tables:
create_sandbox_ddl = SELECT_QUERY.render(
project=self.project_id,
sandbox_dataset=self.sandbox_dataset_id,
sandbox_table=self.sandbox_table_for(table))
sandbox_query = RECORDS_FOR_NON_EXISTING_PIDS.render(
query_type=create_sandbox_ddl,
project=self.project_id,
dataset=self.dataset_id,
table=table)
query_list.append({cdr_consts.QUERY: sandbox_query})
delete_query = RECORDS_FOR_NON_EXISTING_PIDS.render(
query_type="DELETE",
project=self.project_id,
dataset=self.dataset_id,
table=table)
query_list.append({cdr_consts.QUERY: delete_query})
return query_list
def sandbox_table_for(self, affected_table):
issue_numbers_str = '_'.join(
[issue_num.lower() for issue_num in self.issue_numbers])
return f'{issue_numbers_str}_{affected_table}'
def get_sandbox_tablenames(self):
return [
self.sandbox_table_for(affected_table)
for affected_table in self.affected_tables
]
def setup_rule(self, client, *args, **keyword_args):
pass
def setup_validation(self, client, *args, **keyword_args):
pass
def validate_rule(self, client, *args, **keyword_args):
pass
|
from sample.player import Player
class Checker:
def __init__(self):
self.temp = Player()
def remainder(self, file):
time = self.temp.getTime()
if time > 17:
self.temp.playWavFile(file)
return self.temp.wavWasPlayed(file)
else:
return self.temp.resetWav(file)
|
import ast
from collections import defaultdict
from functools import reduce
from typing import ClassVar, DefaultDict, List, Mapping, Set, Type
from typing_extensions import Final, final
from wemake_python_styleguide.compat.aliases import ForNodes
from wemake_python_styleguide.logic import source, walk
from wemake_python_styleguide.logic.nodes import get_parent
from wemake_python_styleguide.logic.tree import ifs, keywords, operators
from wemake_python_styleguide.logic.tree.compares import CompareBounds
from wemake_python_styleguide.logic.tree.functions import given_function_called
from wemake_python_styleguide.types import AnyIf, AnyLoop, AnyNodes
from wemake_python_styleguide.violations.best_practices import (
SameElementsInConditionViolation,
)
from wemake_python_styleguide.violations.consistency import (
ImplicitComplexCompareViolation,
MultilineConditionsViolation,
)
from wemake_python_styleguide.violations.refactoring import (
ImplicitInConditionViolation,
NegatedConditionsViolation,
SimplifiableReturningIfViolation,
UnmergedIsinstanceCallsViolation,
UselessLenCompareViolation,
UselessReturningElseViolation,
)
from wemake_python_styleguide.visitors.base import BaseNodeVisitor
from wemake_python_styleguide.visitors.decorators import alias
_OperatorPairs = Mapping[Type[ast.boolop], Type[ast.cmpop]]
_ELSE_NODES: Final = (*ForNodes, ast.While, ast.Try)
# TODO: move to logic
def _duplicated_isinstance_call(node: ast.BoolOp) -> List[str]:
counter: DefaultDict[str, int] = defaultdict(int)
for call in node.values:
if not isinstance(call, ast.Call) or len(call.args) != 2:
continue
if not given_function_called(call, {'isinstance'}):
continue
isinstance_object = source.node_to_string(call.args[0])
counter[isinstance_object] += 1
return [
node_name
for node_name, count in counter.items()
if count > 1
]
# TODO: move to logic
def _get_duplicate_names(variables: List[Set[str]]) -> Set[str]:
return reduce(
lambda acc, element: acc.intersection(element),
variables,
)
@final
@alias('visit_any_if', (
'visit_If',
'visit_IfExp',
))
class IfStatementVisitor(BaseNodeVisitor):
"""Checks single and consecutive ``if`` statement nodes."""
def visit_any_if(self, node: AnyIf) -> None:
"""Checks ``if`` nodes and expressions."""
self._check_negated_conditions(node)
self._check_useless_len(node)
if isinstance(node, ast.If):
self._check_multiline_conditions(node)
self._check_simplifiable_returning_if(node)
self.generic_visit(node)
def _check_negated_conditions(self, node: AnyIf) -> None:
if isinstance(node, ast.If) and not ifs.has_else(node):
return
if isinstance(node.test, ast.UnaryOp):
if isinstance(node.test.op, ast.Not):
self.add_violation(NegatedConditionsViolation(node))
elif isinstance(node.test, ast.Compare):
if any(isinstance(elem, ast.NotEq) for elem in node.test.ops):
self.add_violation(NegatedConditionsViolation(node))
def _check_useless_len(self, node: AnyIf) -> None:
if isinstance(node.test, ast.Call):
if given_function_called(node.test, {'len'}):
self.add_violation(UselessLenCompareViolation(node))
def _check_multiline_conditions(self, node: ast.If) -> None:
"""Checks multiline conditions ``if`` statement nodes."""
start_lineno = getattr(node, 'lineno', None)
for sub_nodes in ast.walk(node.test):
sub_lineno = getattr(sub_nodes, 'lineno', None)
if sub_lineno is not None and sub_lineno > start_lineno:
self.add_violation(MultilineConditionsViolation(node))
break
def _check_simplifiable_returning_if(self, node: ast.If) -> None:
body = node.body
simple_if_and_root = not (ifs.has_elif(node) or ifs.is_elif(node))
if keywords.is_simple_return(body) and simple_if_and_root:
if ifs.has_else(node):
else_body = node.orelse
if keywords.is_simple_return(else_body):
self.add_violation(SimplifiableReturningIfViolation(node))
return
self._check_simplifiable_returning_parent(node)
def _check_simplifiable_returning_parent(self, node: ast.If) -> None:
parent = get_parent(node)
if isinstance(parent, _ELSE_NODES):
body = parent.body + parent.orelse
else:
body = getattr(parent, 'body', [node])
next_index_in_parent = body.index(node) + 1
if keywords.next_node_returns_bool(body, next_index_in_parent):
self.add_violation(SimplifiableReturningIfViolation(node))
@final
@alias('visit_any_loop', (
'visit_For',
'visit_AsyncFor',
'visit_While',
))
class UselessElseVisitor(BaseNodeVisitor):
"""Ensures that ``else`` is used correctly for different nodes."""
#: Nodes that break or return the execution flow.
_returning_nodes: ClassVar[AnyNodes] = (
ast.Break,
ast.Raise,
ast.Return,
ast.Continue,
)
def __init__(self, *args, **kwargs) -> None:
"""We need to store visited ``if`` not to duplicate violations."""
super().__init__(*args, **kwargs)
self._visited_ifs: Set[ast.If] = set()
def visit_If(self, node: ast.If) -> None:
"""Checks ``if`` statements."""
self._check_useless_if_else(node)
self.generic_visit(node)
def visit_Try(self, node: ast.Try) -> None:
"""Checks exception handling."""
self._check_useless_try_else(node)
self.generic_visit(node)
def visit_any_loop(self, node: AnyLoop) -> None:
"""Checks any loops."""
self._check_useless_loop_else(node)
self.generic_visit(node)
def _check_useless_if_else(self, node: ast.If) -> None:
real_ifs = []
for chained_if in ifs.chain(node):
if isinstance(chained_if, ast.If):
if chained_if in self._visited_ifs:
return
self._visited_ifs.update({chained_if})
real_ifs.append(chained_if)
continue
previous_has_returns = all(
ifs.has_nodes(self._returning_nodes, real_if.body)
for real_if in real_ifs
)
current_has_returns = ifs.has_nodes(
self._returning_nodes, chained_if,
)
if previous_has_returns and current_has_returns:
self.add_violation(
UselessReturningElseViolation(chained_if[0]),
)
def _check_useless_try_else(self, node: ast.Try) -> None:
if not node.orelse or node.finalbody:
# `finally` cancels this rule.
# Because refactoring `try` with `else` and `finally`
# by moving `else` body after `finally` will change
# the execution order.
return
all_except_returning = all(
walk.is_contained(except_, self._returning_nodes)
for except_ in node.handlers
)
else_returning = any(
walk.is_contained(sub, self._returning_nodes)
for sub in node.orelse
)
if all_except_returning and else_returning:
self.add_violation(UselessReturningElseViolation(node))
def _check_useless_loop_else(self, node: AnyLoop) -> None:
if not node.orelse:
return
# An else statement makes sense if we
# want to execute something after breaking
# out of the loop without writing more code
has_break = any(
walk.is_contained(sub, ast.Break)
for sub in node.body
)
if has_break:
return
body_returning = any(
walk.is_contained(sub, self._returning_nodes[1:])
for sub in node.body
)
else_returning = any(
walk.is_contained(sub, self._returning_nodes)
for sub in node.orelse
)
if body_returning and else_returning:
self.add_violation(UselessReturningElseViolation(node))
@final
class BooleanConditionVisitor(BaseNodeVisitor):
"""Ensures that boolean conditions are correct."""
def __init__(self, *args, **kwargs) -> None:
"""We need to store some bool nodes not to visit them twice."""
super().__init__(*args, **kwargs)
self._same_nodes: List[ast.BoolOp] = []
self._isinstance_calls: List[ast.BoolOp] = []
def visit_BoolOp(self, node: ast.BoolOp) -> None:
"""Checks that ``and`` and ``or`` conditions are correct."""
self._check_same_elements(node)
self._check_isinstance_calls(node)
self.generic_visit(node)
def _get_all_names(
self,
node: ast.BoolOp,
) -> List[str]:
# We need to make sure that we do not visit
# one chained `BoolOp` elements twice:
self._same_nodes.append(node)
names = []
for operand in node.values:
if isinstance(operand, ast.BoolOp):
names.extend(self._get_all_names(operand))
else:
names.append(
source.node_to_string(
operators.unwrap_unary_node(operand),
),
)
return names
def _check_same_elements(self, node: ast.BoolOp) -> None:
if node in self._same_nodes:
return # We do not visit nested `BoolOp`s twice.
operands = self._get_all_names(node)
if len(set(operands)) != len(operands):
self.add_violation(SameElementsInConditionViolation(node))
def _check_isinstance_calls(self, node: ast.BoolOp) -> None:
if not isinstance(node.op, ast.Or):
return
for var_name in _duplicated_isinstance_call(node):
self.add_violation(
UnmergedIsinstanceCallsViolation(node, text=var_name),
)
@final
class ImplicitBoolPatternsVisitor(BaseNodeVisitor):
"""Is used to find implicit patterns that are formed by boolops."""
_allowed: ClassVar[_OperatorPairs] = {
ast.And: ast.NotEq,
ast.Or: ast.Eq,
}
def visit_BoolOp(self, node: ast.BoolOp) -> None:
"""Checks ``and`` and ``or`` don't form implicit anti-patterns."""
self._check_implicit_in(node)
self._check_implicit_complex_compare(node)
self.generic_visit(node)
def _check_implicit_in(self, node: ast.BoolOp) -> None:
variables: List[Set[str]] = []
for cmp in node.values:
if not isinstance(cmp, ast.Compare) or len(cmp.ops) != 1:
return
if not isinstance(cmp.ops[0], self._allowed[node.op.__class__]):
return
variables.append({source.node_to_string(cmp.left)})
for duplicate in _get_duplicate_names(variables):
self.add_violation(
ImplicitInConditionViolation(node, text=duplicate),
)
def _check_implicit_complex_compare(self, node: ast.BoolOp) -> None:
if not isinstance(node.op, ast.And):
return
if not CompareBounds(node).is_valid():
self.add_violation(ImplicitComplexCompareViolation(node))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This file contains code that can scrape the Nation Weather Service (NWS) website and read the
river level data for both Markland and McAlpine dams. By using the mileage marker for Bushman's Lake
the level of the river at that point can be calculated.
"""
from NWS_River_Data_scrape_NEW import processRiverData
def test_processRiverData():
pd = processRiverData()
assert type(pd) == dict
assert len(pd) > 0
return
from hypothesis import given
import hypothesis.strategies as hst
def decode(s):
return s
def encode(s):
return s
@given(hst.text())
def test_decode_inverts_encode(s):
assert decode(encode(s)) == s
@given(hst.lists(hst.text()))
def test_ISO_datestring(l):
return
# examples from pytest book
from collections import namedtuple
Task = namedtuple("Task", ["summary", "owner", "done", "id"])
Task.__new__.__defaults__ = (None, None, False, None)
def test_defaults():
"""Using no parameters should invoke defaults."""
t1 = Task()
t2 = Task(None, None, False, None)
assert t1 == t2
def test_member_access():
"""Check .field functionality of namedtuple."""
t = Task("buy milk", "brian")
assert t.summary == "buy milk"
assert t.owner == "brian"
assert (t.done, t.id) == (False, None)
def test_asdict():
"""_asdict() should return a dictionary."""
t_task = Task("do something", "okken", True, 21)
t_dict = t_task._asdict()
expected = {"summary": "do something", "owner": "okken", "done": True, "id": 21}
assert t_dict == expected
def test_replace():
"""replace() should change passed in fields."""
t_before = Task("finish book", "brian", False)
t_after = t_before._replace(id=10, done=True)
t_expected = Task("finish book", "brian", True, 10)
assert t_after == t_expected
|
# Import Python libraries
from dash import Dash, html
import dash_bootstrap_components as dbc
# Create a Dash application, pass in a stylesheet from Bootstrap
app = Dash( external_stylesheets=[dbc.themes.BOOTSTRAP] )
# Create the layout of the app
app.layout = dbc.Container([
# Row 1
dbc.Row([
dbc.Col([
html.Div("Div 1")
],
style={"background-color": "blue"},
),
dbc.Col([
html.Div("Div 2")
]),
],
style={"background-color": "green"},
className="h-75",
),
# Row 2
dbc.Row([
dbc.Col([
html.Div("Div 3")
],
style={"background-color": "pink"},
),
dbc.Col([
html.Div("Div 4")
]),
],
style={"background-color": "yellow"},
className="h-25",
),
],
style={"height": "100vh"},
)
# Run the app
app.run_server() |
import contextlib
import json
from hashfs import HashFS
from pydantic.dataclasses import dataclass
from typing import List, Type, TypeVar, Generic, Generator
from .registerable import Registerable, Serializable
T = TypeVar('T', bound=Registerable)
class ArbitraryTypeConfig:
arbitrary_types_allowed = True
@dataclass(config=ArbitraryTypeConfig)
class Registry(Generic[T]):
fs: HashFS
classes: List[Type['T']]
def load(self, hash_str: str) -> T:
with self.fs.open(hash_str) as f:
contents = json.load(f)
try:
class_title = contents['class']
internal_registry = {cls.schema()['title']: cls for cls in self.classes}
try:
klass = internal_registry[class_title]
return klass(**contents['value'])
except KeyError:
known_classes = ','.join(internal_registry.keys())
raise ValueError(f'Not a recognized class: {class_title} ({known_classes})')
except KeyError:
raise ValueError(f'Not a serialized object: {hash_str}')
DeserializedBase = TypeVar('DeserializedBase')
class SerializableRegistry(Generic[DeserializedBase], Registry[Serializable[DeserializedBase]]):
@contextlib.contextmanager
def open(self, hash_str: str) -> Generator[DeserializedBase, None, None]:
serialized = self.load(hash_str)
deserialized = serialized.unpack(self.fs)
try:
yield deserialized
finally:
serialized.close(deserialized)
|
import compiler
import build
import networkx as nx
from matplotlib import pyplot as plt
def main():
c = compiler.Compiler('i686-linux-gnu-g++-6')
b = build.Build('output', c)
exe = b.add_executable('myexe', ['myexe/main.cpp', 'myexe/other.cpp'])
lib = b.add_dynamic_library('mylib', ['mylib/mylib.cpp'])
exe.link_libraries([lib])
b.build('output/targets/myexe')
main()
|
import os
import sys
sys.path.append("../data")
import math
import torch
from torch import nn
from torch import optim
from tokenizer import ScriptTokenizer
import model
from logger import Logger
'''
ASSUMPTIONS:
1. len(pre_frag) == len(frag) == len(post_frag)
'''
device = "cuda" if torch.cuda.is_available() else "cpu"
def get_test_set(chunk_num, test_size):
test_pres = torch.load(os.path.join(data_dir, "pres", f"pre{chunk_num}.pt")).to(device)[:test_size]
test_frags = torch.load(os.path.join(data_dir, "frags", f"frag{chunk_num}.pt")).to(device)[:test_size]
test_posts = torch.load(os.path.join(data_dir, "posts", f"post{chunk_num}.pt")).to(device)[:test_size]
return test_pres, test_frags, test_posts
if __name__ == "__main__":
# TODO: also make this nice with argparse
if len(sys.argv) != 2:
print("Usage: python train.py data_dir")
data_dir = sys.argv[1]
data_chunks = len(os.listdir(os.path.join(data_dir, "frags")))
logger = Logger("train_loss", "test_loss")
VOCAB_SIZE = 30526
BATCH_SIZE = 10
EPOCHS = 20
SEQ_LEN = 10
TEST_SET_SIZE = 100
#TODO: make better testing
test_chunk = data_chunks - 1
data_chunks -= 1
test_pres, test_frags, test_posts = get_test_set(test_chunk, TEST_SET_SIZE)
test_ground_truth = torch.arange(len(test_pres)).to(device)
mod = model.CSFP(800, VOCAB_SIZE, SEQ_LEN, 500, 10, 10)
opt = optim.Adam(mod.parameters(), lr=3e-4,betas=(0.9,0.98),eps=1e-6,weight_decay=0.0)
loss_context = torch.nn.CrossEntropyLoss()
loss_fragment = torch.nn.CrossEntropyLoss()
for e in range(EPOCHS):
print(f"Epoch {e}...")
for chunk in range(data_chunks):
encoded_pres = torch.load(os.path.join(data_dir, "pres", f"pre{chunk}.pt")).to(device)
encoded_frags = torch.load(os.path.join(data_dir, "frags", f"frag{chunk}.pt")).to(device)
encoded_posts = torch.load(os.path.join(data_dir, "posts", f"post{chunk}.pt")).to(device)
batch_count = math.ceil(len(encoded_pres) / BATCH_SIZE)
for batch in range(batch_count):
b_pre = encoded_pres[batch * BATCH_SIZE : (batch+1) * BATCH_SIZE]
b_frag = encoded_frags[batch * BATCH_SIZE : (batch+1) * BATCH_SIZE]
b_post = encoded_posts[batch * BATCH_SIZE : (batch+1) * BATCH_SIZE]
opt.zero_grad()
lpc, lpf = mod(b_pre, b_frag, b_post)
ground_truth = torch.arange(len(b_pre)).to(device) # len(b_pre) because last batch could be smaller
loss = (loss_context(lpc, ground_truth) + loss_fragment(lpf, ground_truth))/2
logger.log(loss.item(), "train_loss")
loss.backward()
opt.step()
with torch.no_grad():
test_lpc, test_lpf = mod(test_pres, test_frags, test_posts)
test_loss = (loss_context(test_lpc, test_ground_truth) + loss_fragment(test_lpf, test_ground_truth))/2
logger.log(test_loss.item(), "test_loss")
|
import json
from text_filters import *
from classifier import *
from objects import *
def lexical_diversity(article_content):
flat_text = " "
flat_text = flat_text.join(article_content)
return len(flat_text) / len(set(flat_text))
def print_dict_content(dictionary, message=None):
if message != None: print(message)
for key, value in dictionary.items():
print(" ",key, "=>", value)
# Change it on objects
hierarchy = const_hierarchy
def zeros_maker(int_id, str_len = 3):
zero_string = '0'*(str_len - len(str(int_id))) + str(int_id)
return zero_string
def index_hierarchy_id(code_id, headers_dict):
# headers = {
# 'book': {'title': None, 'name': None, 'count' : 0},
# 'part' : {'title': None, 'name': None, 'count' : 0},
# 'headline': {'title': None, 'name': None, 'count' : 0},
# 'chapter': {'title': None, 'name': None, 'count' : 0},
# 'section': {'title': None, 'name': None, 'count' : 0},
# 'article' : {'count' : 0},
# }
index_list = [code_id]
# Yes, i know that this could be made by a for key in headers_dict:
# But now, the order is of adding is important
index_list.append( zeros_maker(headers_dict['book']['count'], 2) )
index_list.append( zeros_maker(headers_dict['part']['count'], 2) )
index_list.append( zeros_maker(headers_dict['headline']['count'], 2) )
index_list.append( zeros_maker(headers_dict['chapter']['count'], 2) )
index_list.append( zeros_maker(headers_dict['section']['count'], 2) )
index_list.append( zeros_maker(headers_dict['article']['count'], 4) )
index = ""
for idx in index_list:
index += idx
return index
def add_to_log(log_info, request_response, post_payload):
response_dict = json.loads(request_response.text)
payload_id = post_payload['id']
id_list = log_info['id']
status_list = log_info['status']
error_list = log_info['error']
message_list = log_info['message']
# On succes, Elastic Search returns the a info document
# Whith no status on de text body
try:
status = response_dict['statusCode']
except:
status = 200
success_message = 'Success loading the article'
# First Case
if log_info['id'] == None:
log_info['id'] = [ post_payload['id'] ]
if status != 200:
log_info['status'] = [ response_dict['statusCode'] ]
log_info['error'] = [ response_dict['error'] ]
log_info['message'] = [ response_dict['message'] ]
else:
log_info['status'] = [ status ]
log_info['message'] = [success_message]
log_info['error'] = [' ']
# The rest
else:
log_info['id'] = id_list + [payload_id]
log_info['status'] = status_list + [status]
if status != 200:
log_info['error'] = error_list + [response_dict['error']]
log_info['message'] = message_list + [response_dict['message']]
else:
log_info['error'] = error_list + [' ']
log_info['message'] = message_list + [success_message]
return log_info
def print_article(art_number, article):
# "lexical_diversity: ", article['article']['lexical_diversity'],"\n",
print(f'Article No {art_number+1}: \n',
"id: ", article['id'],"\n",
article['headline']['title'],article['headline']['name'],"\n",
article['chapter']['title'],article['chapter']['name'],"\n",
article['article']['name'],article['article']['content'])
print('\n--------------------------------------\n')
def split_text_in_lines(text, delimiter):
new_text = []
if delimiter == None:
print('Missing delimiter value')
return
for paragraph in text:
if delimiter in paragraph:
parts = [sentence + delimiter for sentence in paragraph.split(delimiter) if sentence]
for part in parts:
new_text.append(part)
else:
new_text.append(paragraph)
text = new_text
return new_text |
from __future__ import absolute_import, print_function
import json
import os
from builtins import object
import responses
from pypexels import PyPexels
from pypexels.src.settings import API_ROOT, API_VERSION
api_key = os.environ.get('API_KEY', None) or 'API_KEY'
class TestPopular(object):
# TODO: avoid code duplication
# Need to workout how to combine responses.activate so as to avoid
# code duplication, as the testcases are pretty much the same for all
root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.getcwd()
store_mapping = {
'popular': os.sep.join([
root_path,
'tests',
'resources',
'resource__popular_per_page_5_page_2.json',
]),
}
@responses.activate
def test_popular(self):
index = 'popular'
resource_filepath = self.store_mapping[index]
stored_response = json.loads(open(resource_filepath).read())
responses.add(
responses.GET,
# _url contains only the short path like /popular?page=2&per_page=5
'{}/{}{}'.format(API_ROOT, API_VERSION, stored_response.get('_url')),
json=stored_response.get('body'),
status=stored_response.get('status_code'),
content_type='application/json',
adding_headers=stored_response.get('headers'),
match_querystring=True,
)
py_pexels = PyPexels(api_key=api_key)
popular_results_page = py_pexels.popular(page=2, per_page=5)
# Page properties
print(popular_results_page.page)
print(popular_results_page.per_page)
print(popular_results_page.has_next)
print(popular_results_page.has_previous)
print(popular_results_page.link_self)
print(popular_results_page.link_first)
print(popular_results_page.link_last)
print(popular_results_page.link_next)
print(popular_results_page.link_previous)
# Entries
for photo in popular_results_page.entries:
print(photo.id, photo.photographer, photo.width, photo.height, photo.url)
print(photo.src)
|
from ex115.lib.interface import *
from ex115.lib.arquivo import *
from time import sleep
arq = 'cursoemvideo.txt'
if not arquivoExiste(arq):
criarArquivo(arq)
while True:
resposta = menu(['ver pessoas cadastradas', 'Cadastrar pessoa', 'Sair Do Sistema'])
if resposta == 1:
#opção de listar o conteúdo de um arquivo
lerArquivo(arq)
elif resposta == 2:
cabeçalho('NOVO CADASTRO')
nome = str(input('nome: '))
idade = leiaInt('idade: ')
cadastrar(arq, nome, idade)
elif resposta == 3:
cabeçalho('saindo do sistema... até logo')
break
else:
print('\033[31mERRO! digite uma opção válida\033[m')
sleep(0.8) |
import os
import numpy as np
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.models.skill_prior_mdl import SkillSpaceLogger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.antmaze import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.maze.src.maze_data_loader import MazeStateSequenceDataset
from spirl.maze_few_demo import get_demo_from_file, process_demo
from spirl.components.fsil import FewshotDataset
NUM_IL_DEMO = 10
subseq_len = 10
fewshot_dataset = FewshotDataset(
'data/antmaze/Antmaze_UL.pkl',
num_demo=NUM_IL_DEMO,
subseq_len=subseq_len,
)
current_dir = os.path.dirname(os.path.realpath(__file__))
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': SkillSpaceLogger,
'data_dir': '.',
'epoch_cycles_train': 10,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 220, # Total including pre-trained 200
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'finetune_vae': False,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/antmaze/contrastive_UL/exact_fine_tuned_model.pt',
'rst_data_path': './data/antmaze/ant_resets_UL.npy'
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=1e-2,
nz_enc=32,
nz_mid=32,
n_processing_layers=3,
cond_decode=True,
checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/maze/hierarchical_cl_state_gc_UL'
# checkpt_path=f'{os.environ["EXP_DIR"]}/few_shot_imitation_learning/maze/hierarchical_cl_state_gc_UL_contrastive'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config['dataset_spec']['dataset_class'] = MazeStateSequenceDataset
data_config['dataset_spec']['env_name'] = 'antmaze-large-diverse-v0'
data_config['dataset_spec']['dataset_path'] = './data/antmaze/Antmaze_filtered_UL.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1
# import os
# import numpy as np
#
# from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
# from spirl.models.skill_prior_mdl import SkillSpaceLogger
# from spirl.utils.general_utils import AttrDict
# from spirl.configs.default_data_configs.maze import data_spec
# from spirl.components.evaluator import TopOfNSequenceEvaluator
# from spirl.data.maze.src.maze_data_loader import MazeStateSequenceDataset
# from spirl.maze_few_demo import get_demo_from_file, process_demo
#
# NUM_IL_DEMO = 20
# il_demo_states, il_demo_actions = get_demo_from_file('data/maze/demos.pkl', NUM_IL_DEMO)
# processed_demo_states, processed_demo_actions = process_demo(il_demo_states, il_demo_actions, 10)
#
#
# def sample_il_demo(batch):
# idxes = np.random.choice(len(processed_demo_states), size=batch)
# return processed_demo_states[idxes], processed_demo_actions[idxes]
#
#
# current_dir = os.path.dirname(os.path.realpath(__file__))
#
# configuration = {
# 'model': GoalClSPiRLMdl,
# 'logger': SkillSpaceLogger,
# 'data_dir': '.',
# 'epoch_cycles_train': 10,
# 'evaluator': TopOfNSequenceEvaluator,
# 'top_of_n_eval': 100,
# 'top_comp_metric': 'mse',
# 'batch_size': 1024,
# 'num_epochs': 220, # Total including pre-trained 200
# 'il_demo_sampler': sample_il_demo,
# 'il_demo_batch_size': 128
# }
# configuration = AttrDict(configuration)
#
# model_config = AttrDict(
# state_dim=data_spec.state_dim,
# action_dim=data_spec.n_actions,
# n_rollout_steps=10,
# kl_div_weight=1e-2,
# nz_enc=32,
# nz_mid=32,
# n_processing_layers=3,
# cond_decode=True,
# checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/maze/hierarchical_cl_state_gc_4M_B1024'
# )
#
# # Dataset
# data_config = AttrDict()
# data_config.dataset_spec = data_spec
# data_config['dataset_spec']['dataset_class'] = MazeStateSequenceDataset
# data_config['dataset_spec']['env_name'] = 'maze2d-large-v1'
# data_config['dataset_spec']['dataset_path'] = './maze2d-large-blr-v1-noisy-4M.hdf5'
# data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1
|
import os
import glob
# from PIL import Image
import cv2
# def exist_file():
# if (filename.lower().endswith('.png') or filename.lower().endswith('.jpg') or filename.lower().endswith('.gif') or filename.lower().endswith('.bmp') or filename.lower().endswith('.pcx')):
# return True
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw-dir",
help="Directory path to raw images.",
default="./data/raw",
type=str,
)
parser.add_argument(
"--save-dir",
help="Directory path to save resized images.",
default="./data/images",
type=str,
)
args = parser.parse_args()
raw_dir = args.raw_dir
save_dir = args.save_dir
# assert isinstance(target_size, tuple) and len(target_size) == 2, msg
fnames = glob.glob(os.path.join(raw_dir, "*.{}".format("jpg")))
os.makedirs(save_dir, exist_ok=True)
for i, fname in enumerate(fnames):
print(".", end="", flush=True)
img = cv2.imread(fname)
print(fname)
print(type(fname))
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
new_fname = "{}".format(fname.split("\\")[1])
gray_fname = os.path.join(save_dir, new_fname)
cv2.imwrite(gray_fname, img_gray)
print(
"\nMake GRAY scale {} files.\nSaved to directory: `{}`".format(
len(fnames), save_dir
)
)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype code-based operator validation classes** (i.e.,
:mod:`beartype`-specific classes enabling callers to define PEP-compliant
validators from arbitrary caller-defined objects tested via explicitly
supported operators efficiently generating stack-free code).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ TODO }....................
# All "FIXME:" comments for this submodule reside in this package's "__init__"
# submodule to improve maintainability and readability here.
# ....................{ IMPORTS }....................
from beartype.roar import BeartypeValeSubscriptionException
from beartype.vale._valeisabc import _IsABC
from beartype._vale._valesub import _SubscriptedIs
from beartype._util.cache.utilcachecall import callable_cached
from beartype._util.func.utilfuncscope import (
CallableScope,
add_func_scope_attr,
)
from typing import Any
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ CLASSES ~ subscriptable }....................
#FIXME: Generalize to support arbitrary binary operators by:
#* Define a new "_IsOperatorBinaryABC(_IsABC, metaclass=ABCMeta)" superclass.
#* In that superclass:
# * Define a stock __class_getitem__() method whose implementation is
# sufficiently generic so as to be applicable to all subclasses. To do so,
# this method should access class variables defined by those subclasses.
# * Note that there is absolutely no reason or point to define abstract class
# methods forcing subclasses to define various metadata, for the unfortunate
# reason that abstract class methods do *NOT* actually enforce subclasses
# that aren't instantiable anyway to implement those methods. *sigh*
#* Refactor "IsEqual" to:
# * Subclass that superclass.
# * Define the following class variables, which the superclass
# __class_getitem__() method will internally access to implement itself:
# from operator import __eq__
#
# class IsEqual(_IsOperatorBinaryABC):
# _operator = __eq__
# _operator_code = '=='
#
#Ridiculously sweet, eh? We know.
class IsEqual(_IsABC):
'''
**Beartype object equality validator factory** (i.e., class that, when
subscripted (indexed) by any object, creates a new :class:`_SubscriptedIs`
object suitable for subscripting (indexing) :attr:`typing.Annotated` type
hints, validating that :mod:`beartype`-decorated callable parameters and
returns annotated by those hints are equal to that object).
This class efficiently validates that callable parameters and returns are
equal to the arbitrary object subscripting (indexing) this class. Any
:mod:`beartype`-decorated callable parameter or return annotated by a
:attr:`typing.Annotated` type hint subscripted (indexed) by this class
subscripted (indexed) by any object (e.g., ``typing.Annotated[{cls},
beartype.vale.IsEqual[{obj}]]`` for any class ``{cls}`` and object
``{obj}`) validates that parameter or return value to equal that object
under the standard ``==`` equality comparison.
This class is a generalization of the `PEP 586`_-compliant
:attr:`typing.Literal` type hint, because this class does everything
:attr:`typing.Literal` does and substantially more. Superficially,
:attr:`typing.Literal` also validates that callable parameters and returns
are equal to (i.e., ``==``) the literal object subscripting (indexing) that
:attr:`typing.Literal` type hint. The similarity ends there, however.
:attr:`typing.Literal` is only subscriptable by literal :class:`bool`,
:class:`bytes`, :class:`int`, :class:`str`, :class:`Enum`, and
``type(None)`` objects; this class is subscriptable by *any* object.
**This class incurs no time performance penalties at call time.** Whereas
the general-purpose :class:`beartype.vale.Is` class necessarily calls the
caller-defined callable subscripting that class at call time and thus
incurs a minor time performance penalty, this class efficiently reduces to
one-line tests in :mod:`beartype`-generated wrapper functions *without*
calling any callables and thus incurs *no* time performance penalties.
Caveats
----------
**This class is intentionally subscriptable by only a single object.** Why?
Disambiguity. When subscripted by variadic positional (i.e., one or more)
objects, this class internally treats those objects as items of a tuple to
validate equality against rather than as independent objects to iteratively
validate equality against. Since this is non-intuitive, callers should
avoid subscripting this class by multiple objects. Although non-intuitive,
this is also unavoidable. The ``__class_getitem__`` dunder method obeys
the same semantics as the ``__getitem__`` dunder method, which is unable to
differentiate between being subscripted two or more objects and being
subscripted by a tuple of two or more objects. Since being able to validate
equality against tuples of two or more objects is essential and since this
class being subscripted by two or more objects would trivially reduce to
shorthand for the existing ``|`` set operator already supported by this
class, this class preserves support for tuples of two or more objects at a
cost of non-intuitive results when subscripted by multiple objects.
Don't blame us. We didn't vote for `PEP 560`_.
Examples
----------
.. code-block:: python
# Import the requisite machinery.
>>> from beartype import beartype
>>> from beartype.vale import IsEqual
>>> from typing import Annotated
# Lists of the first ten items of well-known simple whole number series.
>>> WHOLE_NUMBERS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> WHOLE_NUMBERS_EVEN = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> WHOLE_NUMBERS_ODD = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
# Type hint matching only lists of integers equal to one of these lists.
>>> SimpleWholeNumberSeries = Annotated[
... list[int],
... IsEqual[WHOLE_NUMBERS] |
... IsEqual[WHOLE_NUMBERS_EVEN] |
... IsEqual[WHOLE_NUMBERS_ODD]
... ]
# Annotate callables by those type hints.
>>> @beartype
... def guess_next(series: SimpleWholeNumberSeries) -> int:
... """
... Guess the next whole number in the passed whole number series.
... """
... if series == WHOLE_NUMBERS: return WHOLE_NUMBERS[-1] + 1
... else: return series[-1] + 2
# Call those callables with parameters equal to one of those objects.
>>> guess_next(list(range(10)))
10
>>> guess_next([number*2 for number in range(10)])
20
# Call those callables with parameters unequal to one of those objects.
>>> guess_next([1, 2, 3, 6, 7, 14, 21, 42,])
beartype.roar._roarexc.BeartypeCallHintPepParamException: @beartyped
guess_next() parameter series=[1, 2, 3, 6, 7, 14, 21, 42] violates type
hint typing.Annotated[list[int], IsEqual[[0, 1, 2, 3, 4, 5, 6, 7, 8,
9]] | IsEqual[[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]] | IsEqual[[1, 3, 5,
7, 9, 11, 13, 15, 17, 19]]], as value [1, 2, 3, 6, 7, 14, 21, 42]
violates data constraint IsEqual[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]] |
IsEqual[[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]] | IsEqual[[1, 3, 5, 7, 9,
11, 13, 15, 17, 19]].
See Also
----------
:class:`beartype.vale.Is`
Further commentary.
.. _PEP 560:
https://www.python.org/dev/peps/pep-0560
'''
# ..................{ DUNDERS }..................
@callable_cached
def __class_getitem__(cls, obj: Any) -> _SubscriptedIs:
'''
`PEP 560`_-compliant dunder method creating and returning a new
:class:`_SubscriptedIs` object validating equality against the passed
arbitrary object suitable for subscripting `PEP 593`_-compliant
:attr:`typing.Annotated` type hints.
This method is memoized for efficiency.
Parameters
----------
obj : Any
Arbitrary object to validate parameters and returns against.
Returns
----------
_SubscriptedIs
New object encapsulating this validation.
Raises
----------
BeartypeValeSubscriptionException
If this class was subscripted by *no* arguments.
See Also
----------
:class:`IsEqual`
Usage instructions.
.. _PEP 560:
https://www.python.org/dev/peps/pep-0560
.. _PEP 593:
https://www.python.org/dev/peps/pep-0593
'''
# If...
if (
# This class was subscripted by either no arguments *OR* two or
# more arguments *AND*...
isinstance(obj, tuple) and
# This class was subscripted by no arguments...
not obj
# Then raise an exception.
):
raise BeartypeValeSubscriptionException(
f'{repr(cls)} subscripted by empty tuple.')
# Else, this class was subscripted by one or more arguments. In any
# case, accept this object as is. See the class docstring for details.
# print(f'IsEqual[{repr(obj)}]')
# Callable inefficiently validating against this object.
is_valid = lambda pith: pith == obj
# Dictionary mapping from the name to value of each local attribute
# referenced in the "is_valid_code" snippet defined below.
is_valid_code_locals: CallableScope = {}
# Name of a new parameter added to the signature of wrapper functions
# whose value is this object, enabling this object to be tested in
# those functions *WITHOUT* additional stack frames.
obj_name = add_func_scope_attr(
attr=obj, attr_scope=is_valid_code_locals)
# Code snippet efficiently validating against this object.
is_valid_code=f'{{obj}} == {obj_name}'
# Create and return this subscription.
return _SubscriptedIs(
is_valid=is_valid,
is_valid_code=is_valid_code,
is_valid_code_locals=is_valid_code_locals,
get_repr=lambda: f'{cls.__name__}[{repr(obj)}]',
)
|
__author__ = 'kenjif'
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import yfinance as yf
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
import gensim
from gensim.corpora import Dictionary
from gensim.models.ldamodel import LdaModel
from gensim.models.coherencemodel import CoherenceModel
from gensim.models.wrappers import LdaMallet
from gensim.parsing.preprocessing import remove_stopwords, preprocess_string, preprocess_documents
from gensim.utils import simple_preprocess
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import spacy as sp
# !python3 -m spacy download en # run in terminal once
import nltk
from words import CustomWords
import argparse
# Local
from dataset import WsbData, StockData
SYMBOLS = {'GME': 'Gamestop'
, 'AMC': 'AMC'
, 'NOK': 'Nokia'
, 'BB': 'Blackberry'
# , 'BBBY': 'Bed\sBath'
# , 'EXPR': 'Express'
# , 'KOSS': 'Koss'
# , 'NAKD': 'Naked\sBrand'
}
CUSTOM = CustomWords()
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="verbose")
# parser.add_argument("-p", "--plot", dest="plot", action="store_true", help="plot")
parser.add_argument("-t", "--title-only", dest="title", action="store_true", help="only check post title")
parser.add_argument("-m", "--mentioned-only", dest="mentioned", action="store_true", help="only check post that mentions the stock")
parser.add_argument("-g", "--gme-only", dest="gme", action="store_true", help="only check GME")
parser.add_argument("-w", "--weighted-sentiment", dest="weighted", action="store_true", help="Weight sentiment by post score")
parser.set_defaults(verbose=False, title=False, mentioned=False, gme=False)
args = parser.parse_args()
if args.gme:
SYMBOLS = {'GME': 'Gamestop'}
def time_lagged_corr(s1, s2, lag_range=None):
if not lag_range:
lag_range = s2.size-1
max_corr = float('-inf')
min_corr = float('inf')
max_lag = 0
min_lag = 0
# Shift s2
for lag in range(-lag_range, lag_range+1):
corr = s1.corr(s2.shift(-lag)) # delay
if corr > max_corr:
max_corr = corr
max_lag = lag
if corr < min_corr:
min_corr = corr
min_lag = lag
return max_corr, max_lag, min_corr, min_lag
def generate_wordcloud(doc_df):
text = ' '.join(doc for doc in doc_df['Doc'])
stopwords = set(CUSTOM.get_git_stopwords())
stopwords = stopwords.union(set(CUSTOM.get_more_stopwords()))
wordcloud = WordCloud(stopwords=stopwords, background_color="white").generate(text)
plt.figure(figsize=[19.2, 10.8])
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.savefig("wordcloud")
def compare_sentiment_return(doc_df, stock_df, daily_rets, symbols=SYMBOLS):
# Combine symbols
valid_symbols = [s + '(?:\s|$)' + '|' + symbols[s] + '(?:\s|$)' for s in symbols.keys()] # make sure the word end with space
valid_docs = doc_df
if args.mentioned:
# Check if the stock symbol exist in the doc
valid_docs = valid_docs.loc[doc_df['Doc'].str.contains('|'.join(valid_symbols), case=False)] # combine stocks
valid_docs = valid_docs.loc[~valid_docs['Doc'].str.contains('megathread', case=False)]
print(valid_docs)
# Get average sentiment
avg_daily_scores = valid_docs[['Sentiment', 'Market Sentiment', 'Score']]
if args.weighted:
weights = avg_daily_scores['Score']/avg_daily_scores['Score'].sum(axis=0) # Weighted by post score
print(weights)
avg_daily_scores['Sentiment'] = avg_daily_scores['Sentiment'].multiply(weights)
avg_daily_scores['Market Sentiment'] = avg_daily_scores['Market Sentiment'].multiply(weights)
avg_daily_scores = avg_daily_scores.groupby(pd.Grouper(freq='D')).mean()[['Sentiment', 'Market Sentiment']]
else:
avg_daily_scores = avg_daily_scores[['Sentiment', 'Market Sentiment']].groupby(pd.Grouper(freq='D')).mean()
avg_daily_scores = avg_daily_scores.fillna(0.)
# print(avg_daily_scores)
# Get average stock price and return
avg_stock_df = stock_df[[s for s in symbols.keys()]]
avg_daily_rets = daily_rets[[s for s in symbols.keys()]]
avg_stock_df.insert(0, 'Stock Price', 0.)
avg_daily_rets.insert(0, 'Daily Return', 0.)
avg_stock_df['Stock Price'] = avg_stock_df.mean(axis=1)
avg_daily_rets['Daily Return'] = avg_daily_rets.mean(axis=1)
# avg_stock_df = stock_df[[s for s in symbols.keys()]].rename(columns={s: "Stock Price"})
# avg_daily_rets = stock_df[[s for s in symbols.keys()]].rename(columns={s: "Daily Return"})
result = pd.concat([avg_daily_scores,
avg_stock_df[['Stock Price']],
avg_daily_rets[['Daily Return']]], axis=1)
# print(result)
# Store data
result.to_csv("{}result_{}.csv".format("weighted_" if args.weighted else "", [s for s in symbols.keys()]))
# Stocks
print("Stocks selected: {}".format([s for s in symbols.keys()]))
print("Mean: {} => {}".format(result['Sentiment'].mean(), result['Market Sentiment'].mean()))
print("Variance: {} => {}".format(result['Sentiment'].var(), result['Market Sentiment'].var()))
print("Standard deviation: {} => {}".format(result["Sentiment"].std(), result['Market Sentiment'].std()))
# Check counts
for s in symbols.keys():
print("Stock mentioned count: {} => {}".format(s, doc_df[doc_df['Doc'].str.contains('|'.join([s + '(?:\s|$)', symbols[s] + '(?:\s|$)']), case=False)].size))
# Jargons
corr = avg_daily_scores['Sentiment'].corr(avg_daily_scores['Market Sentiment'])
print("Pearson correlation of sentiment and market sentiment is {}".format(corr))
# Stock price with jargons
corr = avg_stock_df['Stock Price'].corr(avg_daily_scores['Market Sentiment'])
print("Pearson correlation of price and sentiment is {} with market sentiment".format(corr))
max_corr, max_lag, min_corr, min_lag = time_lagged_corr(avg_stock_df['Stock Price'], avg_daily_scores['Market Sentiment'], 10)
print("Maximum time lagged correlation of price and sentiment is {} with lag {} with market sentiment".format(max_corr, max_lag))
print("Minimum time lagged correlation of price and sentiment is {} with lag {} with market sentiment".format(min_corr, min_lag))
# Daily returns with jargons
corr = avg_daily_rets['Daily Return'].corr(avg_daily_scores['Market Sentiment'])
print("Pearson correlation of return and sentiment is {} with market sentiment".format(corr))
max_corr, max_lag, min_corr, min_lag = time_lagged_corr(avg_daily_rets['Daily Return'], avg_daily_scores['Market Sentiment'], 10)
print("Maximum time lagged correlation of return and sentiment is {} with lag {} with market sentiment".format(max_corr, max_lag))
print("Minimum time lagged correlation of return and sentiment is {} with lag {} with market sentiment".format(min_corr, min_lag))
# Stock price without jargons
corr = avg_stock_df['Stock Price'].corr(avg_daily_scores['Sentiment'])
print("Pearson correlation of price and sentiment is {} without market sentiment".format(corr))
max_corr, max_lag, min_corr, min_lag = time_lagged_corr(avg_stock_df['Stock Price'], avg_daily_scores['Sentiment'], 10)
print("Maximum time lagged correlation of price and sentiment is {} with lag {} without market sentiment".format(max_corr, max_lag))
print("Minimum time lagged correlation of price and sentiment is {} with lag {} without market sentiment".format(min_corr, min_lag))
# Daily returns without jargons
corr = avg_daily_rets['Daily Return'].corr(avg_daily_scores['Sentiment'])
print("Pearson correlation of return and sentiment is {} without market sentiment".format(corr))
max_corr, max_lag, min_corr, min_lag = time_lagged_corr(avg_daily_rets['Daily Return'], avg_daily_scores['Sentiment'], 10)
print("Maximum time lagged correlation of return and sentiment is {} with lag {} without market sentiment".format(max_corr, max_lag))
print("Minimum time lagged correlation of return and sentiment is {} with lag {} without market sentiment".format(min_corr, min_lag))
# Plot
ax = result[['Sentiment', 'Stock Price', 'Daily Return', 'Market Sentiment']].plot(kind='line', title='Average Sentiment vs. Stock Price/Daily Return').legend(loc='upper left')
fig = ax.get_figure()
fig.savefig("{}avg_sentiment_vs_stock_{}".format("weighted_" if args.weighted else "", valid_symbols), bbox_inches='tight')
ax = result[['Stock Price', 'Market Sentiment']].plot(kind='line', title='Average Sentiment vs. Stock Price/Daily Return', color=['orange', 'red']).legend(loc='upper left')
fig = ax.get_figure()
fig.savefig("{}avg_sentiment_vs_stock_partial_{}".format("weighted_" if args.weighted else "", valid_symbols), bbox_inches='tight')
dtw_distance_market, dtw_path_dtw = fastdtw(result['Stock Price'], result['Market Sentiment'], dist=euclidean)
dtw_distance, dtw_path = fastdtw(result['Stock Price'], result['Sentiment'], dist=euclidean)
print("Dynamic time warping price: {} => {}".format(dtw_distance, dtw_distance_market))
dtw_distance_market, dtw_path_dtw = fastdtw(result['Daily Return'], result['Market Sentiment'], dist=euclidean)
dtw_distance, dtw_path = fastdtw(result['Daily Return'], result['Sentiment'], dist=euclidean)
print("Dynamic time warping daily return: {} => {}".format(dtw_distance, dtw_distance_market))
def main():
# Default
wsb_data = WsbData()
stock_data = StockData(stocks=SYMBOLS)
# Title and content
doc_df = wsb_data.get_documents()
# Title only with alternative dataset and time
if args.title:
wsb_data = WsbData(start='2020-12-01', end='2021-02-15', data_path="r_wallstreetbets_posts.csv")
stock_data = StockData(start='2020-12-01', end='2021-02-15', stocks=SYMBOLS)
doc_df = wsb_data.get_titles()
doc_df['Doc'] = doc_df['Doc'].str.lower()
# Sentiment score
doc_df.insert(0, 'Sentiment', 0.) # Insert column
doc_df.insert(0, 'Market Sentiment', 0.) # Insert column
# With and without jargon
sentiment_analyser = SentimentIntensityAnalyzer()
market_sentiment_analyser = SentimentIntensityAnalyzer(lexicon_file="market_lexicon.txt")
for index, row in doc_df.iterrows():
doc_df.at[index, 'Sentiment'] = sentiment_analyser.polarity_scores(row['Doc'])['compound']
doc_df.at[index, 'Market Sentiment'] = market_sentiment_analyser.polarity_scores(row['Doc'])['compound']
# Returns
daily_rets = stock_data.get_daily_returns()
three_day_rets = stock_data.get_three_day_returns()
# Adjusted closing prices
stock_df = stock_data.get_df()
# Normalized log prices
normed_df = stock_data.get_normalized()
# Check dataframe
if args.verbose:
print(doc_df)
print(stock_df)
print(daily_rets)
# Analysis
compare_sentiment_return(doc_df, normed_df, daily_rets, symbols=SYMBOLS)
# generate_wordcloud(doc_df[['Doc']])
return
if __name__ == '__main__':
main()
|
from django.urls import path
from .views import (SignUpView, SignInView)
app_name = 'authentication'
urlpatterns = [
path('signup/', SignUpView.as_view(), name='signup'),
path('signin/', SignInView.as_view(), name='signin'),
]
|
from scipy.misc import imread, imresize, imsave
from scipy.optimize import fmin_l_bfgs_b
from sklearn.preprocessing import normalize
import numpy as np
import time
import os
import argparse
import h5py
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, ZeroPadding2D, AveragePooling2D
from keras import backend as K
"""
Neural Style Transfer with Keras 1.0.2
Uses the VGG-16 model as described in the Keras example below :
https://github.com/fchollet/keras/blob/master/examples/neural_style_transfer.py
Note:
Before running this script, download the weights for the VGG16 model at:
https://drive.google.com/file/d/0Bz7KyqmuGsilT0J5dmRCM0ROVHc/view?usp=sharing
(source: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3)
and make sure the variable `weights_path` in this script matches the location of the file.
-----------------------------------------------------------------------------------------------------------------------
Modifications to original implementation :
- Uses 'conv5_2' output to measure content loss.
Original paper utilizes 'conv4_2' output
- Initial image used for image is the base image (instead of random noise image)
This method tends to create better output images, however parameters have to be well tuned
- Uses AveragePooling2D inplace of MaxPooling2D layers
The original paper uses AveragePooling for better results
- Style weight scaling
- Rescaling of image to original dimensions, using lossy upscaling present in scipy.imresize()
- Maintain aspect ratio of intermediate and final stage images, using lossy upscaling
Note : Aspect Ratio is maintained only if image is not rescaled.
If image is rescaled to original dimensions then aspect ratio is maintained as well.
"""
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('style_reference_image_path', metavar='ref', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--image_size", dest="img_size", default=512, type=int, help='Output Image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float, help="Weight of content") # 0.025
parser.add_argument("--style_weight", dest="style_weight", default=1, type=float, help="Weight of content") # 1.0
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float, help="Scale the weightage of the style") # 1, 0.5, 2
parser.add_argument("--total_variation_weight", dest="tv_weight", default=1e-3, type=float, help="Total Variation in the Weights") # 1.0
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int, help="Number of iterations")
parser.add_argument("--rescale_image", dest="rescale_image", default="True", type=str, help="Rescale image after execution to original dimentions")
parser.add_argument("--rescale_method", dest="rescale_method", default="bilinear", type=str, help="Rescale image algorithm")
parser.add_argument("--maintain_aspect_ratio", dest="maintain_aspect_ratio", default="True", type=str, help="Maintain aspect ratio of image")
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str, help="Optional 'conv4_2'")
parser.add_argument("--init_image", dest="init_image", default="content", type=str, help="Initial image used to generate the final image. Options are 'content' or 'noise")
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_path = args.style_reference_image_path
result_prefix = args.result_prefix
weights_path = r"vgg16_weights.h5"
def strToBool(v):
return v.lower() in ("true", "yes", "t", "1")
rescale_image = strToBool(args.rescale_image)
maintain_aspect_ratio = strToBool(args.maintain_aspect_ratio)
# these are the weights of the different loss components
total_variation_weight = args.tv_weight
style_weight = args.style_weight * args.style_scale
content_weight = args.content_weight
# dimensions of the generated picture.
img_width = img_height = args.img_size
assert img_height == img_width, 'Due to the use of the Gram matrix, width and height must match.'
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path, load_dims=False):
global img_WIDTH, img_HEIGHT, aspect_ratio
img = imread(image_path, mode="RGB") # Prevents crashes due to PNG images (ARGB)
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = img_HEIGHT / img_WIDTH
img = imresize(img, (img_width, img_height))
img = img.transpose((2, 0, 1)).astype('float64')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path, True))
style_reference_image = K.variable(preprocess_image(style_reference_image_path))
# this will contain our generated image
combination_image = K.placeholder((1, 3, img_width, img_height))
# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate([base_image,
style_reference_image,
combination_image], axis=0)
# build the VGG16 network with our 3 images as input
first_layer = ZeroPadding2D((1, 1))
first_layer.set_input(input_tensor, shape=(3, 3, img_width, img_height))
model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
# compute the neural style loss
# first we need to define 4 util functions
# the gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
assert K.ndim(x) == 3
features = K.batch_flatten(x)
gram = K.dot(features, K.transpose(features))
return gram
# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
return K.sum(K.square(combination - base))
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, 1:, :img_height-1])
b = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, :img_width-1, 1:])
return K.sum(K.pow(a + b, 1.25))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer] # 'conv5_2' or 'conv4_2'
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in feature_layers:
layer_features = outputs_dict[layer_name]
style_reference_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, combination_features)
loss += (style_weight / len(feature_layers)) * sl
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, 3, img_width, img_height))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
assert args.init_image in ["content", "noise"] , "init_image must be one of ['original', 'noise']"
if "content" in args.init_image:
x = preprocess_image(base_image_path, True)
else:
x = np.random.uniform(0, 255, (1, 3, img_width, img_height))
num_iter = args.num_iter
for i in range(num_iter):
print('Start of iteration', (i+1))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=20)
print('Current loss value:', min_val)
# save current generated image
img = deprocess_image(x.reshape((3, img_width, img_height)))
if (maintain_aspect_ratio) & (not rescale_image):
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp=args.rescale_method)
if rescale_image:
print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
img = imresize(img, (img_WIDTH, img_HEIGHT), interp=args.rescale_method)
fname = result_prefix + '_at_iteration_%d.png' % (i+1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i+1, end_time - start_time)) |
from dataflows import Flow, update_resource, add_field, checkpoint
from midburn_dataflows.kubectl import kubectl, get_item_detailed_status
def kubectl_get_all_flow(resource_name='kubectl_get_all'):
def add_status(rows):
if rows.res.name == resource_name:
for row in rows:
status = get_item_detailed_status(row)
row.update(**status)
row['num_errors'] = len(row['errors']) if row.get('errors') else 0
row['num_containers'] = len(row['containers']) if row.get('containers') else 0
yield row
else:
yield from rows
return Flow(
kubectl(),
update_resource('res_1', name=resource_name, path='kubectl_get_all.csv'),
add_field('true_status_last_transitions', 'object', resources=[resource_name]),
add_field('errors', 'array', resources=[resource_name]),
add_field('num_errors', 'integer', resources=[resource_name]),
add_field('num_containers', 'integer', resources=[resource_name]),
add_status
)
def kubectl_get_volumes_flow(source_resource_name='kubectl_get_all',
resource_name='kubectl_get_volumes',
get_all_checkpoint_name=None):
volume_object_fields = ['hostPath', 'secret', 'configMap', 'emptyDir', 'gcePersistentDisk',
'nfs']
def get_volumes(rows):
for row in rows:
volumes = row.get('volumes')
for volume in (volumes if volumes else []):
yield {
'name': volume.pop('name'),
'source_name': row['name'],
'source_kind': row['kind'],
'source_namespace': row['namespace'],
**{field: volume.pop(field, None) for field in volume_object_fields},
}
assert len(volume) == 0, volume
def add_volumes(package):
package.pkg.remove_resource(source_resource_name)
package.pkg.add_resource({
'name': resource_name,
'path': f'{resource_name}.csv',
'schema': {
'fields': [
{'name': 'name', 'type': 'string'},
{'name': 'source_kind', 'type': 'string'},
{'name': 'source_name', 'type': 'string'},
{'name': 'source_namespace', 'type': 'string'},
*[{'name': field, 'type': 'object'} for field in volume_object_fields],
]
}
})
yield package.pkg
for rows in package:
if rows.res.name == source_resource_name:
yield get_volumes(rows)
def filter_volumes(rows):
if rows.res.name == resource_name:
for row in rows:
if row['source_namespace'] == 'kube-system': continue
if any((row.get(f) or row.get(f) == {}) for f in ['secret', 'configMap', 'emptyDir']): continue
assert row.get('nfs', None) or row.get('gcePersistentDisk', None), row
yield row
else:
yield from rows
return Flow(
kubectl_get_all_flow(),
checkpoint(get_all_checkpoint_name) if get_all_checkpoint_name else None,
add_volumes,
filter_volumes
)
|
# MIT License.
# Copyright (c) 2021 by BioicDL. All rights reserved.
# Created by LiuXb on 2021/1/21
# -*- coding:utf-8 -*-
"""
@Modified:
@Description:
"""
import sys
import os
_root_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_root_path)
|
from django.contrib import admin
from .models import Business,Post,Neighboorhood
# Register your models here.
admin.site.register(Business)
admin.site.register(Post)
admin.site.register(Neighboorhood) |
#-*- coding: utf-8 -*-
import sys
from optparse import OptionParser
from django.conf import settings, global_settings
if not settings.configured:
settings.configure(
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
INSTALLED_APPS = [
"infinite_pagination",
"tests",
],
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS +
("django.core.context_processors.request",),
ROOT_URLCONF ="tests.urls",
DEBUG = False,
)
from django.test.simple import DjangoTestSuiteRunner
def run_tests(*test_args, **kwargs):
if not test_args:
test_args = ["tests"]
test_runner = DjangoTestSuiteRunner(
verbosity=kwargs.get("verbosity", 1),
interactive=kwargs.get("interactive", False),
failfast=kwargs.get("failfast")
)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--failfast", action="store_true", default=False, dest="failfast")
parser.add_option("--verbosity", action="store", default=1, type=int, dest="verbosity")
(options, args) = parser.parse_args()
run_tests(failfast=options.failfast, verbosity=options.verbosity, *args)
|
from bottle import route, run, get, post, request, static_file, abort, hook, response
from pyzem.dvid import dvidenv, dvidio
from pyzem.compute import skeletonizer, bodysplit
import flyem_data as fd
import json
import subprocess
import sys
import socket
import jsonschema
import os
import timer
import time
import threading
import datetime
import copy
import yaml
import argparse
from queue import *
parser = argparse.ArgumentParser(description='Process arguments for running neutu service')
parser.add_argument('--config', dest='config', type=str, help='Configuration file in YAML format')
args=parser.parse_args()
print(args.config)
if not args.config:
args.config = '/Users/zhaot/Work/neutube/neurolabi/python/service/test.yaml'
with open(args.config, 'r') as fp:
serverConfig = yaml.load(fp)
print(serverConfig)
skl = skeletonizer.Skeletonizer()
bs = bodysplit.BodySplit()
if 'command' in serverConfig:
command = serverConfig['command']
skl.setExecutable(command)
bs.set_neutu_path(command)
if 'task_server' in serverConfig:
bs.load_server(serverConfig['task_server'])
# print(bs.get_env())
# bs.clear_split_task()
# exit(1)
socket.setdefaulttimeout(1000)
dvidEnvMap = {}
eventQueue = Queue()
#eventLock = Lock()
dvidWriter = dvidio.DvidClient()
def processEvent(event):
print("Processing event ...")
print(event.dvidEnv)
dvidWriter.set_dvidenv(event.dvidEnv)
if event.getType() == fd.DataEvent.DATA_INVALIDATE:
if event.getDataId().getType() == fd.DataId.DATA_BODY:
print("Invalidating body", event.getDataId().getBodyId())
dvidWriter.delete_skeleton(event.getDataId().getBodyId())
dvidWriter.delete_thumbnail(event.getDataId().getBodyId())
elif event.getType() == fd.DataEvent.DATA_DELETE:
if event.getDataId().getType() == fd.DataId.DATA_BODY:
print("Deleting body data", event.getDataId().getBodyId())
dvidWriter.delete_skeleton(event.getDataId().getBodyId())
dvidWriter.delete_thumbnail(event.getDataId().getBodyId())
dvidWriter.delete_body_annotation(event.getDataId().getBodyId())
elif event.getType() == fd.DataEvent.DATA_UPDATE:
time.sleep(5)
if event.getDataId().getType() == fd.DataId.DATA_SKELETON:
print("Skeletionzing body", event.getDataId().getBodyId(), event.dvidEnv)
skl.setDvidEnv(event.dvidEnv)
print(skl.getDvidEnv())
skl.skeletonize(event.getDataId().getBodyId())
def process():
eqcopy = []
while True:
try:
elem = eventQueue.get(block=False)
except Empty:
break
else:
eqcopy.append(elem)
for i, e in reversed(eqcopy):
print(e.getDataId().getBodyId(), i)
while True:
try:
event = eventQueue.get(timeout = 5)
print(datetime.datetime.now(), "Processing event ...", event)
processEvent(event)
except Exception as e:
print(e)
try:
print("Clearing split task ...")
bs.clear_split_task()
print("Clearing split result ...")
bs.clear_split_result()
except Exception as e:
print (e)
#threading.Timer(1, process).start()
threading.Timer(1, process).start()
def getSchema(service, method):
with open(service + '/interface.raml') as f:
content = f.readlines()
f.close()
if not content:
raise Exception("Invalid schema")
#print content
serviceHead = False
methodStart = False
schemaStart = False
objectLevel = 0
schema =''
for line in content:
if schemaStart:
schema += line
if line.find('{') >= 0:
objectLevel += 1
if line.find('}') >= 0:
objectLevel -= 1
if objectLevel == 0:
break
line = line.strip(' \t\n\r')
if methodStart:
if line == 'schema: |':
schemaStart = True
if serviceHead:
methodStart = True
if line == '/' + service + ":":
serviceHead = True
return schema
@get('/home')
def home():
return '<h1>Welcome to the skeletonization service</h1>'
@get('/split_task')
def list_split_task():
response = '''
<h1>Split tasks</h1>
<p>
<ol>
'''
taskList = bs.read_task_entries()
for task in taskList:
response += '<li>' + task + '</li>'
p = bs.read_task_property(task)
if p:
if "age" in p:
print(p)
response += '</ol></p>'
return response
# @get('/skeletonize')
# def skeletonize():
# response = '''
# <form action="/skeletonize" method="post">
# Body ID: <input name="bodyId" type="text"/>
# <input value="Submit" type="submit"/>
# '''
# response += "<select name=\"database\">"
# for name in sorted(dvidEnvMap):
# response += "<option value=\"" + name + "\">" + name + "</option>"
# response += "</select>"
#
# response += "</form>"
#
# return response
# @get('/update_body')
# def requestBodyUpdate():
# response = '''
# <form action="/update_body" method="post">
# Body ID: <input name="bodyId" type="text"/>
# <input value="Submit" type="submit"/>
# '''
# response += "<select name=\"database\">"
# for name in sorted(dvidEnvMap):
# response += "<option value=\"" + name + "\">" + name + "</option>"
# response += "</select>"
#
# response += "</form>"
#
# return response
@post('/update_body')
def updateBody():
print(request.content_type)
bodyArray = [];
dvidEnv = None
#dvidServer = getDefaultDvidServer()
#uuid = getDefaultUuid()
option = None
if request.content_type == 'application/json':
print(request.json)
jsonObj = request.json
try:
schema = getSchema('update_body', 'post')
#print schema
jsonschema.validate(jsonObj, json.loads(schema))
except jsonschema.exceptions.ValidationError as inst:
print('Invalid json input')
print(inst)
return '<p>Update for ' + str(bodyArray) + ' failed.</p>'
bodyArray = jsonObj.get('bodies')
#dvidServer = jsonObj.get('dvid-server')
option = jsonObj.get('option')
#uuid = jsonObj['uuid']
#config = {'dvid-server': dvidServer, 'uuid': uuid}
dvidEnv = dvidenv.DvidEnv()
dvidEnv.load_server_config(jsonObj)
print(dvidEnv)
if not option:
option = "update"
for bodyId in bodyArray:
if option == "delete":
event = fd.DataEvent(fd.DataEvent.DATA_DELETE, fd.DataId(fd.DataId.DATA_BODY, bodyId), dvidEnv)
eventQueue.put(event)
print("+++Event added:", event)
if option == "update" or option == "invalidate":
event = fd.DataEvent(fd.DataEvent.DATA_INVALIDATE, fd.DataId(fd.DataId.DATA_BODY, bodyId), dvidEnv)
eventQueue.put(event)
print("+++Event added:", event)
if option == "update" or option == "add":
event = fd.DataEvent(fd.DataEvent.DATA_UPDATE, fd.DataId(fd.DataId.DATA_SKELETON, bodyId), dvidEnv)
eventQueue.put(event)
print("+++Event added:", event)
@post('/skeletonize')
def do_skeletonize():
print(request.content_type)
bodyArray = [];
#dvidServer = getDefaultDvidServer()
#uuid = getDefaultUuid()
if request.content_type == 'application/x-www-form-urlencoded':
bodyIdStr = request.forms.get('bodyId')
dvidName = request.forms.get('database')
skl.setDvidEnv(dvidEnvMap[dvidName])
print(skl.getDvidEnv())
bodyArray = [int(bodyId) for bodyId in bodyIdStr.split()]
elif request.content_type == 'application/json':
print(request.json)
jsonObj = request.json
try:
jsonschema.validate(jsonObj, json.loads(getSchema('skeletonize', 'post')))
except jsonschema.exceptions.ValidationError as inst:
print('Invalid json input')
print(inst)
return '<p>Skeletonization for ' + str(bodyArray) + ' failed.</p>'
uuid = jsonObj['uuid']
if 'dvid-server' in jsonObj:
dvidServer = jsonObj['dvid-server']
bodyArray = jsonObj['bodies']
config = {'dvid-server': dvidServer, 'uuid': uuid}
skl.loadDvidConfig(config)
output = {}
dvidUrl = DvidUrl(skl.getDvidEnv())
for bodyId in bodyArray:
print(dvidUrl.getServerUrl())
conn = http.client.HTTPConnection(dvidUrl.getServerUrl())
bodyLink = dvidUrl.getSkeletonEndPoint(bodyId)
print('************', bodyLink)
conn.request("GET", bodyLink)
outputUrl = dvidUrl.getServerUrl() + bodyLink
r1 = conn.getresponse()
if not r1.status == 200:
try:
print("Skeletonizing " + str(bodyId))
skl.skeletonize(bodyId)
output[str(bodyId)] = outputUrl
except Exception as inst:
return '<p>' + str(inst) + '</p>'
else:
print("skeleton is ready for " + str(bodyId))
output[str(bodyId)] = outputUrl
print(output)
return json.dumps(output, sort_keys = False)
# return '<p>Skeletonization for ' + str(bodyArray) + ' is completed.</p>'
@hook('after_request')
def enable_cors(fn=None):
def _enable_cors(*args, **kwargs):
print('enable cors')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Expose-Headers'] = 'Content-Type'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if request.method != 'OPTIONS':
return fn(*args, **kwargs)
return _enable_cors
@route('/interface/interface.raml', method=['GET', 'OPTIONS'])
@enable_cors
def retrieveRaml():
print('retrieve raml')
fileResponse = static_file('interface.raml', root='.', mimetype='application/raml+yaml')
fileResponse.headers['Access-Control-Allow-Origin'] = '*'
return fileResponse
#with open('interface.raml', 'r') as ramlFile:
# ramlContent = ramlFile.read()
#return ramlContent
def get_json_post():
try:
return json.load(request.body)
except ValueError:
abort(400, 'Bad request: Could not decode request body (expected JSON).')
@post('/json')
def parseJson():
data = get_json_post()
return '<p>' + data['head'] + '</p>'
port = 8080
if 'port' in serverConfig:
port = int(serverConfig['port'])
host = 'localhost'
if 'host' in serverConfig:
host = serverConfig['host']
#if len(sys.argv) > 1:
# port = sys.argv[1]
#run(host=socket.gethostname(), port=port, debug=True)
run(host=host, port=port, debug=True)
# print getSchema('skeletonize', 'post')
# try:
# jsonschema.validate({"bodies": [1, 2, 3]}, json.loads(getSchema('skeletonize', 'post')))
# except jsonschema.exceptions.ValidationError as inst:
# print 'Invalid json input'
# print inst
|
import os
import numpy as np
import pickle
import random
def getFileNameList(filePath):
fileNameList = os.listdir(filePath)
fileNameList = sorted(fileNameList, key=lambda x: x[:x.find('.')])
return fileNameList
def load_CT(ROOT):
""" load all of cifar """
xs = []
ys = []
us = []
file_list = getFileNameList(ROOT)
for file_name in file_list:
file_name = os.path.join(ROOT, file_name)
with open(file_name, 'rb') as f:
datadict = pickle.load(f)
X = datadict[0]
Y = datadict[1]
U = datadict[2]
xs.append(X)
ys.append(Y)
us.append(U)
del X, Y, U
Xtr = np.concatenate(xs)# 使变成行向量
Xtr = Xtr.reshape(-1, 32, 32, 1)
Ytr = np.concatenate(ys)
Utr = np.concatenate(us)
Utr = Utr.reshape(-1, 32, 32, 1)
return Xtr, Ytr, Utr
def random_crop(batch, crop_shape, padding=None):
oshape = np.shape(batch[0])
if padding:
oshape = (oshape[0] + 2*padding, oshape[1] + 2*padding)
new_batch = []
npad = ((padding, padding), (padding, padding), (0, 0))
for i in range(len(batch)):
new_batch.append(batch[i])
if padding:
new_batch[i] = np.lib.pad(batch[i], pad_width=npad,
mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
new_batch[i] = new_batch[i][nh:nh + crop_shape[0],
nw:nw + crop_shape[1]]
return np.array(new_batch)
def random_flip_leftright(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.fliplr(batch[i])
return batch
def random_flip_updown(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.flipud(batch[i])
return batch
def data_preprocessing(x_train,x_test):
for i in range(x_train.shape[-1]):
x_train[:, :, :, i] = (x_train[:, :, :, i] - np.mean(x_train[:, :, :, i])) / np.std(x_train[:, :, :, i])
x_test[:, :, :, i] = (x_test[:, :, :, i] - np.mean(x_test[:, :, :, i])) / np.std(x_test[:, :, :, i])
return x_train, x_test
def data_augmentation(batch):
batch = random_flip_leftright(batch)
batch = random_flip_updown(batch)
batch = random_crop(batch, [32, 32], 4)
return batch
def next_batch(img, label, batch_size, step):
img_batch = img[step * batch_size:step * batch_size + batch_size]
img_batch = data_augmentation(img_batch)
lab_batch = label[step * batch_size:step * batch_size + batch_size]
return img_batch, lab_batch
def shuffle_data(imgData, labData):
index = np.random.permutation(len(imgData))
shuffled_image = imgData[index]
shuffled_label = labData[index]
return shuffled_image, shuffled_label
if __name__ == '__main__':
X, Y, U = load_CT('../DataPKL')
print(X.shape)
print(Y.shape)
print(U.shape)
|
import sys
import numpy as np
import pandas as pd
from scipy import stats
from itertools import permutations
NUM_RANKING = 3
def calculate_weighted_kendall_tau(pred, label, rnk_lst, num_rnk):
"""
calcuate Weighted Kendall Tau Correlation
"""
total_count = 0
total_corr = 0
for i in range(len(label)):
# weighted-kendall-tau는 순위가 높을수록 큰 숫자를 갖게끔 되어있기 때문에
# 순위 인덱스를 반대로 변경해서 계산함 (1위 → 가장 큰 숫자)
corr, _ = stats.weightedtau(num_rnk-1-rnk_lst[label[i]],
num_rnk-1-rnk_lst[pred[i]])
total_corr += corr
total_count += 1
return (total_corr / total_count)
y_true = pd.read_csv(sys.argv[1], header=None, encoding='utf8').to_numpy()
y_pred = pd.read_csv(sys.argv[2], header=None, encoding='utf8').to_numpy()
rnk_lst = np.array(list(permutations(np.arange(NUM_RANKING), NUM_RANKING)))
# get scores
score = np.round(calculate_weighted_kendall_tau(y_pred, y_true, rnk_lst, NUM_RANKING), 7)
print("score:", score) |
#!/usr/bin/env python
import os
import re
import sys
comment = re.compile('^\s*#')
assign = re.compile('^\s*([a-zA-Z_]+)\s*(\?)?=\s*([^#]*)')
args = os.environ.copy()
for arg in sys.argv:
m = assign.match(arg)
if m:
var = m.group(1).strip()
val = m.group(3).strip()
args[var] = val
# special case for DPDK_DIR, which is short for CONFIG_DPDK_DIR
if 'DPDK_DIR' in args and 'CONFIG_DPDK_DIR' not in args:
args['CONFIG_DPDK_DIR'] = args['DPDK_DIR']
with open('CONFIG') as f:
for line in f:
line = line.strip()
if not comment.match(line):
m = assign.match(line)
if m:
var = m.group(1).strip()
default = m.group(3).strip()
val = default
if var in args:
val = args[var]
if default.lower() == 'y' or default.lower() == 'n':
if val.lower() == 'y':
print "#define SPDK_{} 1".format(var)
else:
print "#undef SPDK_{}".format(var)
else:
strval = val.replace('"', '\"')
print "#define SPDK_{} \"{}\"".format(var, strval)
|
from torchvision import transforms
import torch
import urllib
from PIL import Image
from flask import Flask, jsonify, request
import io
from json import *
import json
app = Flask(__name__)
transform_test = transforms.Compose([
transforms.Resize((600, 600), Image.BILINEAR),
transforms.CenterCrop((448, 448)),
# transforms.RandomHorizontalFlip(), # only if train
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
model = torch.hub.load('nicolalandro/ntsnet-cub200', 'ntsnet', pretrained=True, **{'topN': 6, 'device':'cpu', 'num_classes': 200})
model.eval()
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
def allowed_file(filename):
# xxx.png
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def get_prediction(img):
with torch.no_grad():
top_n_coordinates, concat_out, raw_logits, concat_logits, part_logits, top_n_index, top_n_prob = model(img)
_, predict = torch.max(concat_logits, 1)
pred_id = predict.item()
name = model.bird_classes[pred_id][4:].replace("_"," ")
data = {'id': pred_id, 'name': name}
print(data)
return jsonify(data)
@app.route('/predict', methods=['POST'])
def predict():
print (request.is_json)
content = request.get_json()
url = content['value']
print(url)
if request.method == 'POST':
file = request.files.get('file')
if file is None or file.filename == "":
if url == None:
return jsonify({'error': 'no file'})
else:
img = Image.open(urllib.request.urlopen(url))
else:
if not allowed_file(file.filename):
return jsonify({'error': 'format not supported'})
else:
img = file.read()
img = Image.open(io.BytesIO(img))
scaled_img = transform_test(img)
torch_images = scaled_img.unsqueeze(0)
prediction = get_prediction(img=torch_images)
return prediction
|
import numpy as np
import tensorflow as tf
import pdb
import itertools
from collections import OrderedDict
from mopo.models.utils import get_required_argument
from mopo.utils.logging import Progress, Silent
import wandb
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class AE:
""" The energy model with sliced score matching. """
def __init__(self, lr,
obs_dim, act_dim, rew_dim, hidden_dim,
early_stop_patience, name="SSM"):
self.lr = lr
self._early_stop_patience = early_stop_patience
self._state = {} # used for storing weight
self._snapshot = (None, 1e10) # store the best (epoch, val_loss) pair
self._epochs_since_update = 0
self.obs_dim = obs_dim
self.act_dim = act_dim
self.rew_dim = rew_dim
self.hidden_dim = hidden_dim
self.noise_type = 'radermacher'
self.sess_ssm = tf.Session(config=tf.ConfigProto())
def get_data(self):
# create placeholder for the graph
self.input = tf.placeholder(tf.float32,
shape=[None, self.obs_dim * 2 + self.act_dim], # [batch_size, data_size]
name="inputs_data")
self.label = tf.placeholder(tf.float32,
shape=[None, self.obs_dim * 2 + self.act_dim],
name='output_data')
def inference(self):
# Build model here, predict the energy
fc1 = tf.layers.dense(self.input,
self.hidden_dim,
activation=tf.nn.softplus,
name='fc1') # Output tensor the same shape as inputs except the last dimension is of size units.
fc2 = tf.layers.dense(fc1,
self.hidden_dim//4,
activation=tf.nn.softplus,
name='fc2')
# fc3 = tf.layers.dense(fc2,
# self.hidden_dim//4,
# activation=tf.nn.softplus,
# name='fc3')
# fc4 = tf.layers.dense(fc3,
# self.hidden_dim//2,
# activation=tf.nn.softplus,
# name='fc4')
fc5 = tf.layers.dense(fc2,
self.hidden_dim,
activation=tf.nn.softplus,
name='fc5')
self.pred = tf.layers.dense(fc5,
self.obs_dim * 2 + self.act_dim,
name='prediction') # returned shape should be [ batch_size, obs_dim * 2 + act_dim]
def loss(self):
with tf.variable_scope('loss'):
# self.loss = tf.reduce_mean(tf.squared_difference(self.pred, self.label), name='l2_loss')
self.loss = tf.nn.l2_loss(self.pred-self.label, name='l2_loss') # sum of t^2
# self.loss = tf.reduce_sum(loss)
def optimize(self):
# setup the optimizer, using Adam
with tf.variable_scope('optimizer'):
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def eval(self, val_in, val_label):
with tf.variable_scope('eval'):
eval_loss = self.sess_ssm.run(
self.loss,
feed_dict={
self.input: val_in, self.label: val_label
}
)
# l1 = self.sess_ssm.run(
# self.loss1,
# feed_dict={self.input: val_in}
# )
# l2 = self.sess_ssm.run(
# self.loss2,
# feed_dict={self.input: val_in}
# )
# print("during eval, the loss 1 is {}, the loss 2 is {}".format(l1, l2))
return eval_loss
def summary(self):
# TODO
pass
def _predict(self):
with tf.variable_scope('predict'):
self.reconstruct_error = tf.reduce_sum(tf.squared_difference(self.pred, self.label), axis=-1, keep_dims=True, name='reconstruct_error')
def predict(self, inputs):
# inputs = np.tile(inputs[None], [self.num_nets, 1, 1]) # to align the shape
# return self.sess_ssm.run(self.energy, feed_dict={self.input: inputs})[0]
with tf.variable_scope('predict'):
reconsturct_error = self.sess_ssm.run(
self.reconstruct_error,
feed_dict={
self.input: inputs, self.label:inputs
}
)
return reconsturct_error # use the reconstruction loss as the penalty
def build(self):
'''
Build the computation graph
'''
self.get_data()
self.inference()
self.loss()
self.optimize()
self._predict()
self.summary()
# initialize variables
self.sess_ssm.run(tf.global_variables_initializer())
def train(self,
inputs,
targets,
batch_size=256,
max_epochs=100,
holdout_ratio=0.2,
max_grad_updates=None,
max_logging=1000, # TODO: what is this used for?
hide_progress=False,
max_t=None):
"""
@params: inputs: (s, a, s') pair. shape [#buffer, 2*obs_dim+act_dim]
"""
self._state = {}
break_train = False # used to break the training once the holdout_losses doesn't improve
#
# split into training and holdout sets
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
permutation = np.random.permutation(inputs.shape[0])
inputs, holdout_inputs = inputs[permutation[num_holdout:]], inputs[permutation[:num_holdout]]
targets, holdout_targets = targets[permutation[num_holdout:]], targets[permutation[:num_holdout]]
# expand the inputs data to fit the shape of nn
# holdout_inputs = np.tile(holdout_inputs[None], [self.num_nets, 1, 1])
# print('[ SSM ] Training{} | Holdout: {}'.format(inputs.shape, holdout_inputs.shape))
idxs = np.arange(inputs.shape[0])
if hide_progress:
progress = Silent()
else:
progress = Progress(max_epochs)
grad_updates = 0
val_loss = 0
if max_epochs is not None:
epoch_iter = range(max_epochs)
else:
epoch_iter = itertools.count()
# epoch_iter = range(1)
for epoch in epoch_iter:
for batch_num in range(int(np.ceil(idxs.shape[-1] / batch_size))):
batch_idxs = idxs[batch_num * batch_size:(batch_num + 1) * batch_size]
self.sess_ssm.run(
self.optimizer,
feed_dict={self.input: inputs[batch_idxs],
self.label: targets[batch_idxs]}
)
# print("check vaule when training, logp: {}, grad1: {}, gradv: {}, grad2: {}, loss1:{}, loss2:{}, loss:{}".format(
# logp_o, grad1_o, gradv_o, grad2_o, loss1_o, loss2_o, loss_o
# ))
grad_updates += 1
# shuffle data for eval
# idxs = shuffle_rows(idxs)
np.random.shuffle(idxs)
# val and logging
if not hide_progress:
model_loss = self.eval(inputs[idxs[:max_logging]], targets[idxs[:max_logging]])
holdout_loss = self.eval(holdout_inputs, holdout_targets)
wandb.log({'AE': {'val_loss': holdout_loss, 'train_loss': model_loss}})
# for printing
named_losses = [['M', model_loss]]
named_holdout_losses = [['V', holdout_loss]]
named_losses = named_losses + named_holdout_losses
progress.set_description(named_losses)
break_train = self._save_best(epoch, holdout_loss)
progress.update()
# stop training
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
val_loss = self.eval(holdout_inputs, holdout_targets)
print(' [ AE ] Finish training, the validation loss is :', val_loss)
return OrderedDict({'val_loss': val_loss})
def _save_best(self, epoch, holdout_loss):
"""
save the checkpoint and early stop
The condition of early stopping: (best - current)/current > 0.01 and
"""
updated = False
current = holdout_loss
_, best = self._snapshot
improvement = (best - current) / best # Notice this is different with the one used in bnn._save_best
print("improvement {} and updates steps {} and current holdout_loss {}, best loss {}".format(improvement,
self._epochs_since_update,
current, best))
if improvement > 0.01:
self._snapshot = (epoch, current)
# save current state
# saver.save(self.sess_ssm, '')
updated = True
# early stopping
if updated:
self._epochs_since_update = 0
else:
self._epochs_since_update += 1
if self._epochs_since_update > self._early_stop_patience:
return True
else:
return False
|
# File: symanteccas_connector.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
import json
import ssl
import sys
import phantom.app as phantom
import requests
import websocket
from bs4 import UnicodeDammit
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from phantom.vault import Vault
from symanteccas_consts import *
class SymanteccasConnector(BaseConnector):
def __init__(self):
# Calling the BaseConnector's init function
super(SymanteccasConnector, self).__init__()
self._api_key = None
self._url = None
self._websocket_url = None
self._verify_server_cert = False
self._timeout = SYMANTECCAS_DEFAULT_TIMEOUT
self._headers = None
self._websocket_conn = None
self._server_request_id = None
return
def _handle_py_ver_compat_for_input_str(self, input_str, always_encode=False):
"""
This method returns the encoded|original string based on the Python version.
:param input_str: Input string to be processed
:return: input_str (Processed input string based on following logic 'input_str - Python 3; encoded input_str - Python 2')
"""
try:
if input_str and (self._python_version < 3 or always_encode):
input_str = UnicodeDammit(input_str).unicode_markup.encode('utf-8')
except:
self.debug_print("Error occurred while handling python 2to3 compatibility for the input string")
return input_str
def initialize(self):
"""
This is an optional function that can be implemented by the AppConnector derived class. Since the configuration
dictionary is already validated by the time this function is called, it's a good place to do any extra
initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
try:
self._python_version = int(sys.version_info[0])
except:
return self.set_status(phantom.APP_ERROR, "Error occurred while getting the Phantom server's Python major version")
config = self.get_config()
self._api_key = self._handle_py_ver_compat_for_input_str(config[SYMANTECCAS_JSON_API_KEY])
self._url = self._handle_py_ver_compat_for_input_str(config[SYMANTECCAS_JSON_URL].strip('/'))
self._verify_server_cert = config.get(SYMANTECCAS_JSON_VERIFY_SERVER_CERT, False)
self._timeout = config.get(SYMANTECCAS_JSON_TIMEOUT_SECS, SYMANTECCAS_DEFAULT_TIMEOUT)
self._headers = {SYMANTECCAS_X_API_TOKEN: self._api_key}
url_components = self._url.split("://")
if len(url_components) > 1:
protocol = url_components[0]
device_address = url_components[1]
if protocol == 'https':
self._websocket_url = "wss://" + device_address
elif protocol == 'http':
self._websocket_url = "ws://" + device_address
else:
self._websocket_url = self._url
else:
self._websocket_url = self._url
return phantom.APP_SUCCESS
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = ERR_CODE_MSG
error_msg = e.args[0]
else:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
except:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
try:
error_msg = self._handle_py_ver_compat_for_input_str(error_msg)
except TypeError:
error_msg = TYPE_ERR_MSG
except:
error_msg = ERR_MSG_UNAVAILABLE
return error_code, error_msg
# Function that makes the REST call to the device,
# generic function that can be called from various action handlers
def _make_rest_call(self, endpoint, action_result, method="post", files=None):
rest_resp = None
error_resp_dict = {
SYMANTECCAS_REST_RESP_RESOURCE_INCORRECT: SYMANTECCAS_REST_RESP_RESOURCE_INCORRECT_MSG,
SYMANTECCAS_REST_RESP_ACCESS_DENIED: SYMANTECCAS_REST_RESP_ACCESS_DENIED_MSG,
SYMANTECCAS_REST_RESP_RESOURCE_NOT_FOUND: SYMANTECCAS_REST_RESP_RESOURCE_NOT_FOUND_MSG
}
# get or post or put, whatever the caller asked us to use,
# if not specified the default will be 'post'
try:
request_func = getattr(requests, method)
except:
self.save_progress(SYMANTECCAS_ERR_API_UNSUPPORTED_METHOD, method=method)
# set the action_result status to error, the handler function
# will most probably return as is
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_API_UNSUPPORTED_METHOD,
method=str(method)), rest_resp
# Make the call
try:
response = request_func(SYMANTECCAS_BASE_URL.format(url=self._url) + endpoint,
headers=self._headers, files=files, verify=self._verify_server_cert)
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
error_text = "{0}. Error Code:{1}. Error Message:{2}".format(
SYMANTECCAS_ERR_SERVER_CONNECTION,
error_code,
error_msg
)
self.debug_print(error_text)
# set the action_result status to error, the handler function
# will most probably return as is
return action_result.set_status(phantom.APP_ERROR, error_text), rest_resp
if response.status_code in error_resp_dict.keys():
self.debug_print(SYMANTECCAS_ERR_FROM_SERVER.format(status=response.status_code,
detail=error_resp_dict[response.status_code]))
# set the action_result status to error, the handler function
# will most probably return as is
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_FROM_SERVER, status=response.status_code,
detail=error_resp_dict[response.status_code]), rest_resp
# Try parsing the json, even in the case of an HTTP error the data
# might contain a json of details 'message'
if response.status_code == SYMANTECCAS_REST_RESP_SUCCESS:
content_type = response.headers['content-type']
if content_type.find('json') != -1:
try:
rest_resp = response.json()
except Exception as e:
# response.text is guaranteed to be NON None, it will be empty,
# but not None
msg_string = SYMANTECCAS_ERR_JSON_PARSE.format(raw_text=response.text)
self.debug_print(msg_string)
return action_result.set_status(phantom.APP_ERROR, msg_string, e), rest_resp
# If error in response
result = rest_resp.get('result', {})
if result.get(SYMANTECCAS_JSON_RESP_STATUS) == SYMANTECCAS_STATUS_ERROR:
error_msg = result.get(SYMANTECCAS_JSON_RESP_ERROR)
self.debug_print(SYMANTECCAS_ERR_UNABLE_FULFILL_REQ_WITH_ERROR.format(error=error_msg))
return action_result.set_status(phantom.APP_ERROR,
SYMANTECCAS_ERR_UNABLE_FULFILL_REQ_WITH_ERROR.format(error=error_msg)),\
rest_resp
return phantom.APP_SUCCESS, rest_resp
# All other response codes from Rest call are failures
# The HTTP response doesnt return error message in case of unknown error code
self.debug_print(SYMANTECCAS_ERR_FROM_SERVER.format(status=response.status_code,
detail=SYMANTECCAS_REST_RESP_OTHER_ERROR_MSG))
# set the action_result status to error, the handler function
# will most probably return as is
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_FROM_SERVER, status=response.status_code,
detail=SYMANTECCAS_REST_RESP_OTHER_ERROR_MSG), rest_resp
def _test_connectivity(self, param):
"""
Called when the user depresses the test connectivity button on the Phantom UI.
Use a basic query to determine if the source IP, port and API key is correct
Initiate a websocket and start listening to it
In a normal scenario we do not expect any message over socket and wait until timeout
If we receive message "Access Denied" or any other exception its a failure
"""
action_result = ActionResult()
self.save_progress(SYMANTECCAS_TEST_CONN_LOGIN)
response = None
# Querying endpoint to detonate empty file
self.save_progress(SYMANTECCAS_CONNECTION_TEST_MSG)
self.save_progress("Configured URL: {}".format(self._url))
self.save_progress("Configured WebSocket URL: {}".format(self._websocket_url))
ret_val, json_resp = self._make_rest_call(SYMANTECCAS_DETONATE_FILE_ENDPOINT, action_result)
# Since no file is uploaded, make rest call should fail with message "No file uploaded"
if phantom.is_fail(ret_val) and "No file uploaded" not in action_result.get_message():
self.save_progress(action_result.get_message())
return action_result.get_status()
# Querying endpoint to check websocket connection to device
self.save_progress("Creating websocket connection")
ret_val = self._create_web_socket(action_result)
# If websocket connection is not successful
if phantom.is_fail(ret_val):
self.save_progress(action_result.get_message())
return action_result.get_status()
# Opening websocket connection to listen for traffic
try:
# To get response from websocket
response = self._websocket_conn.recv()
# If we get Access denied in response, it means unauthorised connection
if response == SYMANTECCAS_SOCKET_ERR_ACCESS_DENIED:
self.debug_print(SYMANTECCAS_SOCKET_ERR_ACCESS_DENIED)
self.save_progress("Access denied. Check the credentials.")
self.set_status(phantom.APP_ERROR, SYMANTECCAS_CONNECTIVITY_FAIL)
return action_result.set_status(phantom.APP_ERROR)
except websocket.WebSocketTimeoutException:
# Success scenario
# If we will not get any response until time out, its success.
if response != SYMANTECCAS_SOCKET_ERR_ACCESS_DENIED:
self.set_status_save_progress(phantom.APP_SUCCESS, SYMANTECCAS_CONNECTIVITY_SUCC)
return action_result.set_status(phantom.APP_SUCCESS)
except Exception as e:
# In case of any other error scenarios
self.debug_print(SYMANTECCAS_CONNECTIVITY_FAIL, e)
self.set_status_save_progress(phantom.APP_ERROR, SYMANTECCAS_CONNECTIVITY_FAIL, e)
return action_result.set_status(phantom.APP_ERROR)
# In all other cases return success
self.set_status_save_progress(phantom.APP_SUCCESS, SYMANTECCAS_CONNECTIVITY_SUCC)
return action_result.set_status(phantom.APP_SUCCESS)
# This function is used for websocket connection with sandbox
def _create_web_socket(self, action_result):
# Validating if timeout parameter is positive integer
if not (str(self._timeout).isdigit() and int(self._timeout) > 0):
self.debug_print(SYMANTECCAS_JSON_INVALID_TIMEOUT)
self.save_progress(SYMANTECCAS_JSON_INVALID_TIMEOUT)
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_JSON_INVALID_TIMEOUT)
# In case of test_asset_connectivity, ignoring the actual timeout as it may be too high.
# Overriding timeout to 5 secs
if phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY:
self.save_progress(SYMANTECCAS_TEST_CONN_OVERRIDE_TIMEOUT)
time_out = SYMANTECCAS_TEST_CONN_TIMEOUT
else:
time_out = self._timeout
try:
# Calling endpoint as per verify server certificate input parameter
# For secure connection using wss, else ws
if self._verify_server_cert:
self._websocket_conn = websocket.create_connection(SYMANTECCAS_TEST_CONN_ENDPOINT.format(
websocket_url=self._websocket_url), header=self._headers, timeout=int(time_out),
sslopt={"ca_certs": self.get_ca_bundle(), "cert_reqs": ssl.CERT_REQUIRED})
else:
self._websocket_conn = websocket.create_connection(SYMANTECCAS_TEST_CONN_ENDPOINT.format(
websocket_url=self._websocket_url), header=self._headers, timeout=int(time_out),
sslopt={"cert_reqs": ssl.CERT_NONE})
except Exception as e:
self.debug_print(SYMANTECCAS_ERR_SERVER_CONNECTION, e)
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_SERVER_CONNECTION, e)
return phantom.APP_SUCCESS
def _detonate_file(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
summary_data = action_result.update_summary({})
# Initiating websocket connection
ret_val = self._create_web_socket(action_result)
# If websocket connection is unsuccessful
if phantom.is_fail(ret_val):
self.set_status(phantom.APP_ERROR, SYMANTECCAS_CONNECTIVITY_FAIL)
return action_result.get_status()
# Call for detonate file scan over REST
return_val, json_resp = self._query_file(param, action_result)
# If something went wrong
if phantom.is_fail(return_val):
return action_result.get_status()
# If response is None
if not json_resp:
self.debug_print(SYMANTECCAS_ERR_UNABLE_FULFILL_REQ)
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_UNABLE_FULFILL_REQ)
# Get the server generated request id to match with websocket response id
result = json_resp.get(SYMANTECCAS_JSON_RESP_RESULT)
self._server_request_id = result[SYMANTECCAS_JSON_RESP_ID]
# Wait for websocket response
status, json_resp = self._wait_for_report(action_result)
if phantom.is_fail(status):
return action_result.get_status()
# Add score and status in summary
if json_resp.get(SYMANTECCAS_JSON_RESP_SCORE) or json_resp.get(SYMANTECCAS_JSON_RESP_SCORE) == 0:
summary_data["global_score"] = json_resp[SYMANTECCAS_JSON_RESP_SCORE]
if json_resp.get(SYMANTECCAS_JSON_RESP_STATUS):
summary_data["global_status"] = json_resp[SYMANTECCAS_JSON_RESP_STATUS]
action_result.add_data(json_resp)
action_result.set_status(phantom.APP_SUCCESS)
return action_result.get_status()
def _query_file(self, param, action_result):
# Mandatory input parameter
vault_id = self._handle_py_ver_compat_for_input_str(param[SYMANTECCAS_JSON_VAULT_ID])
try:
file_obj = open(Vault.get_file_path(vault_id), 'rb')
filename = (Vault.get_file_info(vault_id=vault_id, file_name=None, container_id=None)[0])['name']
except:
self.debug_print(SYMANTECCAS_UNKNOWN_VAULT_ID.format(vault_id=vault_id))
return action_result.set_status(phantom.APP_ERROR,
SYMANTECCAS_UNKNOWN_VAULT_ID.format(vault_id=vault_id)), None
# Optional input parameter
file = param.get(SYMANTECCAS_JSON_FILE_NAME, filename)
files = {filename: (file, file_obj)}
return self._make_rest_call(SYMANTECCAS_DETONATE_FILE_ENDPOINT, action_result, files=files)
def _wait_for_report(self, action_result):
"""
This function is used to wait for response from websocket. We will compare server generated id with id in
received response If both ids are matched we will return json response.
"""
# Poll for 10 times if response is not received
for polling_attempt in range(SYMANTECCAS_STATUS_MAX_POLLING_COUNT):
try:
# Wait for websocket response
response = self._websocket_conn.recv()
except websocket._exceptions.WebSocketTimeoutException as wse:
self.debug_print(SYMANTECCAS_ERR_WEBSOCKET_TIMEOUT)
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_WEBSOCKET_TIMEOUT, wse), None
except Exception as e:
self.debug_print(SYMANTECCAS_ERR_WEBSOCKET, e)
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_WEBSOCKET, e), None
# If we get response before timeout
if response:
try:
# Convert string type response in json
json_resp = json.loads(response)
except Exception as e:
self.debug_print(SYMANTECCAS_ERR_WAITING_REPORT.format(raw_text=response))
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_WAITING_REPORT.format(
raw_text=response), e), None
# Check if received response is complete(status 1) or partially complete(status 3) and check if server
# generated request id is matched or not with current response
if json_resp.get(SYMANTECCAS_JSON_RESP_STATUS) in \
[SYMANTECCAS_STATUS_COMPLETE, SYMANTECCAS_STATUS_COMPLETE_WITH_ERROR] \
and json_resp[SYMANTECCAS_JSON_RESP_ID] == self._server_request_id:
return phantom.APP_SUCCESS, json_resp
self.debug_print(SYMANTECCAS_ERR_WEBSOCKET_NO_RESPONSE)
return action_result.set_status(phantom.APP_ERROR, SYMANTECCAS_ERR_WEBSOCKET_NO_RESPONSE), None
def finalize(self):
"""
This function gets called once all the param dictionary elements are looped over and no more handle_action calls
are left to be made. It gives the AppConnector a chance to loop through all the results that were accumulated by
multiple handle_action function calls and create any summary if required. Another usage is cleanup, disconnect
from remote devices etc.
Purpose of this function is to close websocket connection
"""
if self._websocket_conn:
self._websocket_conn.close()
def handle_action(self, param):
"""
This function implements the main functionality of the AppConnector. It gets called for every param dictionary
element in the parameters array. In it's simplest form it gets the current action identifier and then calls a
member function of it's own to handle the action. This function is expected to create the results of the action
run that get added to the connector run. The return value of this function is mostly ignored by the
BaseConnector. Instead it will just loop over the next param element in the parameters array and call
handle_action again.
We create a case structure in Python to allow for any number of actions to be easily added.
"""
# Supported actions by app
supported_actions = {
'test_asset_connectivity': self._test_connectivity,
'detonate_file': self._detonate_file,
}
action = self.get_action_identifier()
try:
run_action = supported_actions[action]
except:
raise ValueError('action %r is not supported' % action)
return run_action(param)
if __name__ == '__main__':
import pudb
pudb.set_trace()
if len(sys.argv) < 2:
print('No test json specified as input')
sys.exit(0)
with open(sys.argv[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = SymanteccasConnector()
connector.print_progress_message = True
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
|
class SnakeUtils:
def __init__(self, data):
self.data = data
def determine_state(self):
print("Head..")
print("X: {}".format(self.data.you.head.x))
print("Y: {}".format(self.data.you.head.y))
print("Body piece positions..")
for i in range(1, len(self.data.you.body)):
print("{} - X: {}".format(i, self.data.you.body[i].x))
print("{} - Y: {}".format(i, self.data.you.body[i].y))
|
# Generated by Django 3.2.7 on 2021-10-05 15:19
import json
from django.db import migrations
def update_fields_with_images(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Convention = apps.get_model("conventions", "Convention")
for convention in Convention.objects.all():
field = convention.comments
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
convention.comments = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
convention.save()
def rollback_field_with_images(apps, schema_editor):
Convention = apps.get_model("conventions", "Convention")
for convention in Convention.objects.all():
field = convention.comments
if field is not None and field != "":
try:
json_field = json.loads(field)
convention.comments = json_field["text"] if "text" in json_field else ""
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
convention.save()
class Migration(migrations.Migration):
dependencies = [
("conventions", "0010_alter_pret_preteur"),
]
operations = [
migrations.RunPython(update_fields_with_images, rollback_field_with_images),
]
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.kaldi.extractors.concat_ext import ConcatFrontExtractor
from openvino.tools.mo.ops.convolution import Convolution
from openvino.tools.mo.ops.op import Op
from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
class ConcatFrontExtractorTest(KaldiFrontExtractorTest):
@classmethod
def register_op(cls):
Op.registered_ops['Concat'] = Convolution
def test_concat(self):
ConcatFrontExtractor.extract(self.test_node)
self.assertEqual(self.test_node.axis, 1)
|
#!/usr/bin/python
import argparse, sys, os, random, re
from shutil import rmtree, copytree
# This will operate a pipeline to go from
# Pre:
# pacbio_raw .. the path to the bax.h5 or bas.h5 you want to process
# please keep in mind that in this requires the accordingly
# named xml file to be in the parent directory as the results
# are stored by pacbio.
# output directory must not already exist.
# Post:
# output direcotry by default is in the current working directory
# and called pre-IDP_output/ but can be anything by settting --output
#
def main():
parser = argparse.ArgumentParser(description="take step 1 and step 2 outputs and make inputs suitable for IDP",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--step_1_folder',required=True,help='FOLDERNAME output of step one')
parser.add_argument('--step_2_folder',required=True,help='FOLDERNAME output of step two')
parser.add_argument('--lsc_replacement_threshold',type=float,default=0.9,help='Replace corrected with full length corrected when they are this fraction of the full length')
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default='/tmp',help='FOLDERNAME location of random temporary directory')
group.add_argument('--specific_tempdir',help='FOLDERNAME location of specific temporary directory. Will not remove during cleanup.')
parser.add_argument('--output',default='output_pre-IDP_step-3_final',help='FOLDERNAME of output must not already exist')
parser.add_argument('--save_tempdir',help='DIRECTORYNAME name of a directory to be created that has the temporary folder in it.')
args = parser.parse_args()
if os.path.isdir(args.output):
sys.stderr.write("ERROR "+args.output+" output folder must not already exist.\n")
sys.exit()
if args.save_tempdir:
if os.path.isdir(args.save_tempdir):
sys.stderr.write("ERROR "+args.save_tempdir+" folder must not already exist.\n")
sys.exit()
tdir = setup_temporary_directory(args)
sys.stderr.write("working in "+tdir+"\n")
if not os.path.exists(tdir+'/output'):
os.makedirs(tdir+'/output')
of_log = open(tdir+'/output/LOG','w')
of_log.write(str(sys.argv)+"\n")
sys.stderr.write("Replace LSC corrected with full when lengths are similar.\n")
[cortot,correp] = execute_replacement(tdir,args.step_2_folder,args.lsc_replacement_threshold)
of_log.write("LSC replacement threshold: "+str(args.lsc_replacement_threshold)+"\n")
sys.stderr.write("Replaced "+str(correp)+ " of "+ str(cortot)+" where corrected length was similar to corrected full length\n")
sys.stderr.write("Compile the non-redundant set of corrected and uncorrected long reads\n")
[zhq,zlq,zsub] = make_nonredundant(tdir+'/output/lr_nonredundant.fa',args.step_1_folder.rstrip('/')+'/ccs_hq/ccs_hq.fa',args.step_2_folder.rstrip('/')+'/full_LR.fa',args.step_1_folder.rstrip('/')+'/lr_nonredundant_uncorrected.fa')
sys.stderr.write(str(zhq)+" high quality ccs reads\n")
sys.stderr.write(str(zlq)+" lsc corrected reads\n")
sys.stderr.write(str(zsub)+" longest subreads\n")
of_log.write(str(zhq)+" high quality ccs reads\n")
of_log.write(str(zlq)+" lsc corrected reads\n")
of_log.write(str(zsub)+" longest subreads\n")
added = make_isoform(tdir+'/output/lr_for_isoforms.fa',tdir+'/output/lr_nonredundant.fa',tdir+'/swapped_corrected.fa')
sys.stderr.write("Added "+str(cortot-correp)+" full length lsc corrected sequences for isoform prediction fasta\n")
of_log.write("Added "+str(cortot-correp)+" full length lsc corrected sequences for isoform prediction fasta\n")
of_log.close()
copytree(tdir+'/output',args.output)
if args.save_tempdir:
copytree(tdir,args.save_tempdir)
if not args.specific_tempdir:
rmtree(tdir)
def make_isoform(output_fasta,nr_fasta,swapped_fasta):
of = open(output_fasta,'w')
with open(nr_fasta) as inf:
for line in inf: of.write(line)
with open(swapped_fasta) as inf:
for line in inf: of.write(line)
of.close()
return
def make_nonredundant(output_fasta,hq_fasta,lq_corrected_fasta,subread_fasta):
hq = read_fasta_into_hash(hq_fasta)
lq = read_fasta_into_hash(lq_corrected_fasta)
sub = read_fasta_into_hash(subread_fasta)
seen_names = set()
zhq = 0
zlq = 0
zsub = 0
of = open(output_fasta,'w')
for name in hq:
short_name = get_short_name(name)
seen_names.add(short_name)
zhq += 1
of.write(">"+name+"\n"+hq[name]+"\n")
for name in lq:
short_name = get_short_name(name)
if short_name not in seen_names:
seen_names.add(short_name)
zlq += 1
of.write(">"+name+"\n"+lq[name]+"\n")
keep = {}
for name in sub:
short_name = get_short_name(name)
if short_name not in seen_names:
if short_name not in keep:
keep[short_name] = {}
keep[short_name]['name'] = ''
keep[short_name]['len'] = 0
keep[short_name]['seq'] = ''
if len(sub[name]) > keep[short_name]['len']: # new longest
keep[short_name]['name'] = name
keep[short_name]['len'] = len(sub[name])
keep[short_name]['seq'] = sub[name]
for short_name in keep:
zsub +=1
of.write(">"+keep[short_name]['name']+"\n"+keep[short_name]['seq']+"\n")
of.close()
return [zhq,zlq,zsub]
def get_short_name(name):
m = re.match('^([^\/]+\/\d+)',name)
short_name = name
if m:
short_name = m.group(1)
else:
sys.stderr.write("ERROR strange gene name "+name+"\n")
sys.exit()
return short_name
def execute_replacement(tdir,lsc_dir,thresh):
full = read_fasta_into_hash(lsc_dir.rstrip('/')+'/full_LR.fa')
corrected = read_fasta_into_hash(lsc_dir.rstrip('/')+'/corrected_LR.fa')
# put full back into corrected when lengths are similar
of_not_swap_full = open(tdir+'/not_swapped_full.fa','w')
of_swap_corrected = open(tdir+'/swapped_corrected.fa','w')
z = 0
zswap = 0
for name in corrected:
z += 1
short_name = name
m = re.match('^(.*)\|[\d\.]+$',name)
if m: short_name = m.group(1)
if short_name not in full:
sys.stderr.write("ERROR: " + name + " not in full")
sys.exit()
full_len = len(full[short_name])
corrected_len = len(corrected[name])
if len(corrected[name]) > len(full[short_name]):
sys.stderr.write("WARNING: length of corrected greater than length of full")
if len(full[short_name])*thresh <= len(corrected[name]):
of_swap_corrected.write('>'+name+"\n"+full[short_name]+"\n")
zswap+=1
else:
of_swap_corrected.write('>'+name+"\n"+corrected[name]+"\n")
of_not_swap_full.write('>'+short_name+"\n"+full[short_name]+"\n")
of_not_swap_full.close()
of_swap_corrected.close()
return [z, zswap]
def setup_temporary_directory(args):
if args.specific_tempdir:
tdir = args.specific_tempdir.rstrip('/')
if not os.path.isdir(args.tempdir):
os.makedirs(tdir)
return tdir
if not os.path.isdir(args.tempdir):
sys.stderr.write("ERROR invalid temporary directory "+args.tempdir+"\n")
sys.exit()
tdir = args.tempdir.rstrip('/')+'/'+'preidp.'+str(random.randint(1,10000000))
os.makedirs(tdir)
if not os.path.isdir(tdir):
sys.stderr.write("ERROR failed to make working temp directory "+args.tempdir+"\n")
sys.exit()
return tdir
# pre: A fasta file name
# post: A dictionary of sequences keyed by name
# modifies: none
def read_fasta_into_hash(fasta_filename):
seqs = {}
with open(fasta_filename) as f:
seq = ''
head = ''
prog = re.compile('^>(.+)')
for line in f:
line = line.rstrip()
m = prog.match(line)
if m:
if seq != '':
seqs[head] = seq
seq = ''
head = m.group(1)
else:
seq = seq + line
if seq != '':
seqs[head] = seq
return seqs
if __name__=="__main__":
main()
|
# We need to use the low-level library to interact with SageMaker since the SageMaker API
# is not available natively through Lambda.
import boto3
import json
# The SageMaker runtime is what allows us to invoke the endpoint that we've created.
from typing import Dict
runtime = boto3.Session().client('sagemaker-runtime')
def lambda_handler(event: Dict, context):
body = json.loads(event.get('body'))
print(f'Request body: {body}')
review = body['review'] # JSON data in event
print(f'Request review: {review}')
# Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given
response = runtime.invoke_endpoint(EndpointName='pytorch-inference-2021-05-30-16-16-32-577', # The name of the endpoint we created
ContentType='text/plain', # The data format that is expected
Body=review.encode('utf-u')) # The actual review
# The response is an HTTP response whose body contains the result of our inference
result = response['Body'].read().decode('utf-8')
print(f'Response body: {result}')
return {
'statusCode': 200,
'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
'body': result
}
|
#create_tables.py
import psycopg2
# Create database connection
try:
conn = psycopg2.connect(database="postgres", user="postgres", password="password", host="127.0.0.1", port="5432")
except:
print("I am unable to connect to the database")
conn.autocommit = True
# Create a cursor
cur = conn.cursor()
# Create table queries
# tweets
cur.execute("DROP TABLE IF EXISTS tweets;")
cur.execute(
"""
CREATE TABLE tweets (
id SERIAL,
created_at Date,
positive INTEGER,
negative INTEGER,
neutral INTEGER
)
""")
# tweets_location
cur.execute("DROP TABLE IF EXISTS tweets_location;")
cur.execute(
"""
CREATE TABLE tweets_location (
id SERIAL,
created_at Date,
location TEXT,
coordinates TEXT,
geo FLOAT,
place TEXT
)
""")
## recommendations
cur.execute("DROP TABLE IF EXISTS recommendations;")
cur.execute(
"""
CREATE TABLE recommendations (
id SERIAL,
created_at Date,
recommendation TEXT,
price DOUBLE PRECISION
)
""")
# bt_price
cur.execute("DROP TABLE IF EXISTS bt_price;")
cur.execute(
"""
CREATE TABLE bt_price (
id SERIAL,
created_at Date,
price DOUBLE PRECISION
)
""")
# btc_price_prediction
cur.execute("DROP TABLE IF EXISTS btc_price_prediction;")
cur.execute(
"""
CREATE TABLE btc_price_prediction (
id SERIAL,
created_at Date,
price DOUBLE PRECISION
)
""")
# btc_price_prediction_rf
cur.execute("DROP TABLE IF EXISTS btc_price_prediction_rf;")
cur.execute(
"""
CREATE TABLE btc_price_prediction_rf (
id SERIAL,
created_at Date,
price DOUBLE PRECISION
)
""")
# btc_price_prediction_ses
cur.execute("DROP TABLE IF EXISTS btc_price_prediction_ses;")
cur.execute(
"""
CREATE TABLE btc_price_prediction_ses (
id SERIAL,
created_at Date,
price DOUBLE PRECISION
)
""")
# btc_price_prediction_varmax
cur.execute("DROP TABLE IF EXISTS btc_price_prediction_varmax;")
cur.execute(
"""
CREATE TABLE btc_price_prediction_varmax (
id SERIAL,
created_at Date,
price DOUBLE PRECISION
)
""")
# btc_price_prediction_overall
cur.execute("DROP TABLE IF EXISTS btc_price_prediction_overall;")
cur.execute(
"""
CREATE TABLE btc_price_prediction_overall (
id SERIAL,
created_at Date,
price DOUBLE PRECISION
)
""")
# Commit the changes
conn.commit()
# Close the database connection
conn.close()
# Close communication with the PostgreSQL database server
cur.close()
|
from flask import Flask, render_template, request
import webbrowser, json
import runner as r
import normalize as n
import time
app = Flask(__name__)
@app.route('/',methods=['POST','GET'])
def main():
return render_template('index.html')
@app.route('/url',methods=['POST','GET'])
def url():
if request.method == 'POST':
resp_json = request.get_json()
f = resp_json['text'] #link
print(f)
time.sleep(2)
r.parse2(f)
new_rating = n.get_nr()
return json.dumps({"response": new_rating}), 200
if __name__ == '__main__':
#webbrowser.open('http://127.0.0.1:5000')
app.run(debug=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.