content
stringlengths 5
1.05M
|
|---|
# modules
# dash-related libraries
import dash
from dash.dependencies import Output, Event
from math import log10, floor, isnan
from datetime import datetime
from random import randint
import dash_core_components as dcc
import dash_html_components as html
import colorama
import sys
import getopt
# non-dash-related libraries
import plotly.graph_objs as go
import pandas as pd
import cbpro
import numpy as np
# modules added by contributors
import time
import threading
from queue import Queue
# custom library
from gdax_book import GDaxBook
colorama.init()
# creating variables to facilitate later parameterization
debugLevel = 3
debugLevels = ["Special Debug","Debug","Info","Warnings","Errors"]
debugColors = ['\033[34m','\033[90m','\033[32m','\033[33;1m','\033[31m']
serverPort = 8050
clientRefresh = 1
desiredPairRefresh = 10000 # (in ms) The lower it is, the better is it regarding speed of at least some pairs, the higher it is, the less cpu load it takes.
# js_extern = "https://cdn.rawgit.com/pmaji/crypto-whale-watching-app/master/main.js" # remove this. you never know when a link will be hijacked. this now load all js files from a the local source.
noDouble = True # if activatet each order is in case of beeing part of a ladder just shown once (just as a bubble, not as a ladder)
SYMBOLS = {"USD": "$", "BTC": "₿", "EUR": "€", "GBP": "£"} # used for the tooltip
SIGNIFICANT = {"USD": 2, "BTC": 5, "EUR": 2, "GBP": 2} # used for rounding
TBL_PRICE = 'price'
TBL_VOLUME = 'volume'
tables = {}
depth_ask = {}
depth_bid = {}
marketPrice = {}
prepared = {}
shape_bid = {}
shape_ask = {}
timeStampsGet = {} # For storing timestamp of Data Refresh
timeStamps = {} # For storing timestamp from calc start at calc end
sendCache = {}
first_prepare = True
first_pull = True
overallNewData = False
class Exchange:
ticker = []
client = ""
def __init__(self, pName, pTicker, pStamp):
self.name = pName
self.ticker.extend(pTicker)
self.millis = pStamp
class Pair:
# Class to store a pair with its respective threads
def __init__(self, pExchange, pTicker):
self.ob_Inst = {}
self.threadWebsocket = {}
self.threadPrepare = {}
self.threadRecalc = {}
self.Dataprepared = False
self.webSocketKill = 1
self.lastStamp = 0
self.usedStamp = 0
self.newData = False
self.name = pExchange + " " + pTicker
self.ticker = pTicker
self.lastUpdate = "0"
self.exchange = pExchange
self.prepare = False
self.websocket = False
self.combined = pExchange + pTicker
def log(pLevel, pMessage):
if pLevel >= debugLevel:
text = (str(datetime.now()) + " [" +
debugLevels[pLevel] +
"]: " + str(pMessage))
open("log.txt","a").write(text + "\n")
print(debugColors[pLevel] + text + '\033[0m')
def get_ticker_list():
with open("trading_pairs.txt") as f:
the_list = sorted(word.strip(",") for line in f for word in line.split())
log(2, the_list)
return the_list
PAIRS = [] # Array containing all pairs
E_GDAX = Exchange("GDAX", get_ticker_list(),0) # get tickers from trading_pairs.txt file.
for ticker in E_GDAX.ticker:
cObj = Pair(E_GDAX.name, ticker)
PAIRS.append(cObj)
# creates a cache to speed up load time and facilitate refreshes
def get_data_cache(ticker):
return tables[ticker]
def get_All_data():
return prepared
def getSendCache():
return sendCache
def calc_data(pair, range=0.05, maxSize=32, minVolumePerc=0.01, ob_points=60):
global tables, timeStamps, shape_bid, shape_ask, E_GDAX, marketPrice, timeStampsGet
# function to get data from GDAX to be referenced in our call-back later
# ticker a string to particular Ticker (e.g. ETH-USD)
# range is the deviation visible from current price
# maxSize is a parameter to limit the maximum size of the bubbles in the viz
# minVolumePerc is used to set the minimum volume needed for a price-point to be included in the viz
ticker = pair.ticker
exchange = pair.exchange
combined = exchange + ticker
if pair.exchange == E_GDAX.name:
# order_book = gdax.PublicClient().get_product_order_book(ticker, level=3)
order_book = pair.ob_Inst.get_current_book()
pair.usedStamp = getStamp()
ask_tbl = pd.DataFrame(data=order_book['asks'], columns=[
TBL_PRICE, TBL_VOLUME, 'address'])
bid_tbl = pd.DataFrame(data=order_book['bids'], columns=[
TBL_PRICE, TBL_VOLUME, 'address'])
timeStampsGet[pair.combined] = datetime.now().strftime("%H:%M:%S") # save timestamp at data pull time
# Determine what currencies we're working with to make the tool tip more dynamic.
currency = ticker.split("-")[0]
base_currency = ticker.split("-")[1]
sig_use = SIGNIFICANT.get(base_currency.upper(), 2)
symbol = SYMBOLS.get(base_currency.upper(), "")
try:
first_ask = float(ask_tbl.iloc[1, 0])
except (IndexError):
log(4,"Empty data for " + combined + " Will wait 3s")
time.sleep(3)
return False
# prepare Price
ask_tbl[TBL_PRICE] = pd.to_numeric(ask_tbl[TBL_PRICE])
bid_tbl[TBL_PRICE] = pd.to_numeric(bid_tbl[TBL_PRICE])
# data from websocket are not sorted yet
ask_tbl = ask_tbl.sort_values(by=TBL_PRICE, ascending=True)
bid_tbl = bid_tbl.sort_values(by=TBL_PRICE, ascending=False)
# get first on each side
first_ask = float(ask_tbl.iloc[1, 0])
# get perc for ask/ bid
perc_above_first_ask = ((1.0 + range) * first_ask)
perc_above_first_bid = ((1.0 - range) * first_ask)
# limits the size of the table so that we only look at orders 5% above and under market price
ask_tbl = ask_tbl[(ask_tbl[TBL_PRICE] <= perc_above_first_ask)]
bid_tbl = bid_tbl[(bid_tbl[TBL_PRICE] >= perc_above_first_bid)]
# changing this position after first filter makes calc faster
bid_tbl[TBL_VOLUME] = pd.to_numeric(bid_tbl[TBL_VOLUME])
ask_tbl[TBL_VOLUME] = pd.to_numeric(ask_tbl[TBL_VOLUME])
# prepare everything for depchart
ob_step = (perc_above_first_ask - first_ask) / ob_points
ob_ask = pd.DataFrame(columns=[TBL_PRICE, TBL_VOLUME, 'address', 'text'])
ob_bid = pd.DataFrame(columns=[TBL_PRICE, TBL_VOLUME, 'address', 'text'])
# Following is creating a new tbl 'ob_bid' which contains the summed volume and adress-count from current price to target price
i = 1
last_ask = first_ask
last_bid = first_ask
current_ask_volume = 0
current_bid_volume = 0
current_ask_adresses = 0
current_bid_adresses = 0
while i < ob_points:
# Get Borders for ask/ bid
current_ask_border = first_ask + (i * ob_step)
current_bid_border = first_ask - (i * ob_step)
# Get Volume
current_ask_volume += ask_tbl.loc[
(ask_tbl[TBL_PRICE] >= last_ask) & (ask_tbl[TBL_PRICE] < current_ask_border), TBL_VOLUME].sum()
current_bid_volume += bid_tbl.loc[
(bid_tbl[TBL_PRICE] <= last_bid) & (bid_tbl[TBL_PRICE] > current_bid_border), TBL_VOLUME].sum()
# Get Adresses
current_ask_adresses += ask_tbl.loc[
(ask_tbl[TBL_PRICE] >= last_ask) & (ask_tbl[TBL_PRICE] < current_ask_border), 'address'].count()
current_bid_adresses += bid_tbl.loc[
(bid_tbl[TBL_PRICE] <= last_bid) & (bid_tbl[TBL_PRICE] > current_bid_border), 'address'].count()
# Prepare Text
ask_text = (str(round_sig(current_ask_volume, 3, 0, sig_use)) + currency + " (from " + str(current_ask_adresses) +
" orders) up to " + str(round_sig(current_ask_border, 3, 0, sig_use)) + symbol)
bid_text = (str(round_sig(current_bid_volume, 3, 0, sig_use)) + currency + " (from " + str(current_bid_adresses) +
" orders) down to " + str(round_sig(current_bid_border, 3, 0, sig_use)) + symbol)
# Save Data
ob_ask.loc[i - 1] = [current_ask_border, current_ask_volume, current_ask_adresses, ask_text]
ob_bid.loc[i - 1] = [current_bid_border, current_bid_volume, current_bid_adresses, bid_text]
i += 1
last_ask = current_ask_border
last_bid = current_bid_border
# Get Market Price
try:
mp = round_sig((ask_tbl[TBL_PRICE].iloc[0] +
bid_tbl[TBL_PRICE].iloc[0]) / 2.0, 3, 0, sig_use)
except (IndexError):
log(4,"Empty data for " + combined + " Will wait 3s")
time.sleep(3)
return False
bid_tbl = bid_tbl.iloc[::-1] # flip the bid table so that the merged full_tbl is in logical order
fulltbl = bid_tbl.append(ask_tbl) # append the buy and sell side tables to create one cohesive table
minVolume = fulltbl[TBL_VOLUME].sum() * minVolumePerc # Calc minimum Volume for filtering
fulltbl = fulltbl[
(fulltbl[TBL_VOLUME] >= minVolume)] # limit our view to only orders greater than or equal to the minVolume size
fulltbl['sqrt'] = np.sqrt(fulltbl[
TBL_VOLUME]) # takes the square root of the volume (to be used later on for the purpose of sizing the order bubbles)
final_tbl = fulltbl.groupby([TBL_PRICE])[
[TBL_VOLUME]].sum() # transforms the table for a final time to craft the data view we need for analysis
final_tbl['n_unique_orders'] = fulltbl.groupby(
TBL_PRICE).address.nunique().astype(int)
final_tbl = final_tbl[(final_tbl['n_unique_orders'] <= 20.0)]
final_tbl[TBL_PRICE] = final_tbl.index
final_tbl[TBL_PRICE] = final_tbl[TBL_PRICE].apply(round_sig, args=(3, 0, sig_use))
final_tbl[TBL_VOLUME] = final_tbl[TBL_VOLUME].apply(round_sig, args=(1, 2))
final_tbl['n_unique_orders'] = final_tbl['n_unique_orders'].apply(round_sig, args=(0,))
final_tbl['sqrt'] = np.sqrt(final_tbl[TBL_VOLUME])
final_tbl['total_price'] = (((final_tbl['volume'] * final_tbl['price']).round(2)).apply(lambda x: "{:,}".format(x)))
# Following lines fix double drawing of orders in case it´s a ladder but bigger than 1%
if noDouble:
bid_tbl = bid_tbl[(bid_tbl['volume'] < minVolume)]
ask_tbl = ask_tbl[(ask_tbl['volume'] < minVolume)]
bid_tbl['total_price'] = bid_tbl['volume'] * bid_tbl['price']
ask_tbl['total_price'] = ask_tbl['volume'] * ask_tbl['price']
# Get Dataset for Volume Grouping
vol_grp_bid = bid_tbl.groupby([TBL_VOLUME]).agg(
{TBL_PRICE: [np.min, np.max, 'count'], TBL_VOLUME: np.sum, 'total_price': np.sum})
vol_grp_ask = ask_tbl.groupby([TBL_VOLUME]).agg(
{TBL_PRICE: [np.min, np.max, 'count'], TBL_VOLUME: np.sum, 'total_price': np.sum})
# Rename column names for Volume Grouping
vol_grp_bid.columns = ['min_Price', 'max_Price', 'count', TBL_VOLUME, 'total_price']
vol_grp_ask.columns = ['min_Price', 'max_Price', 'count', TBL_VOLUME, 'total_price']
# Filter data by min Volume, more than 1 (intefere with bubble), less than 70 (mostly 1 or 0.5 ETH humans)
vol_grp_bid = vol_grp_bid[
((vol_grp_bid[TBL_VOLUME] >= minVolume) & (vol_grp_bid['count'] >= 2.0) & (vol_grp_bid['count'] < 70.0))]
vol_grp_ask = vol_grp_ask[
((vol_grp_ask[TBL_VOLUME] >= minVolume) & (vol_grp_ask['count'] >= 2.0) & (vol_grp_ask['count'] < 70.0))]
# Get the size of each order
vol_grp_bid['unique'] = vol_grp_bid.index.get_level_values(TBL_VOLUME)
vol_grp_ask['unique'] = vol_grp_ask.index.get_level_values(TBL_VOLUME)
# Round the size of order
vol_grp_bid['unique'] = vol_grp_bid['unique'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['unique'] = vol_grp_ask['unique'].apply(round_sig, args=(3, 0, sig_use))
# Round the Volume
vol_grp_bid[TBL_VOLUME] = vol_grp_bid[TBL_VOLUME].apply(round_sig, args=(1, 0, sig_use))
vol_grp_ask[TBL_VOLUME] = vol_grp_ask[TBL_VOLUME].apply(round_sig, args=(1, 0, sig_use))
# Round the Min/ Max Price
vol_grp_bid['min_Price'] = vol_grp_bid['min_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['min_Price'] = vol_grp_ask['min_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_bid['max_Price'] = vol_grp_bid['max_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['max_Price'] = vol_grp_ask['max_Price'].apply(round_sig, args=(3, 0, sig_use))
# Round and format the Total Price
vol_grp_bid['total_price'] = (vol_grp_bid['total_price'].round(sig_use).apply(lambda x: "{:,}".format(x)))
vol_grp_ask['total_price'] = (vol_grp_ask['total_price'].round(sig_use).apply(lambda x: "{:,}".format(x)))
# Append individual text to each element
vol_grp_bid['text'] = ("There are " + vol_grp_bid['count'].map(str) + " orders " + vol_grp_bid['unique'].map(
str) + " " + currency +
" each, from " + symbol + vol_grp_bid['min_Price'].map(str) + " to " + symbol +
vol_grp_bid['max_Price'].map(str) + " resulting in a total of " + vol_grp_bid[
TBL_VOLUME].map(str) + " " + currency + " worth " + symbol + vol_grp_bid[
'total_price'].map(str))
vol_grp_ask['text'] = ("There are " + vol_grp_ask['count'].map(str) + " orders " + vol_grp_ask['unique'].map(
str) + " " + currency +
" each, from " + symbol + vol_grp_ask['min_Price'].map(str) + " to " + symbol +
vol_grp_ask['max_Price'].map(str) + " resulting in a total of " + vol_grp_ask[
TBL_VOLUME].map(str) + " " + currency + " worth " + symbol + vol_grp_ask[
'total_price'].map(str))
# Save data global
shape_ask[combined] = vol_grp_ask
shape_bid[combined] = vol_grp_bid
cMaxSize = final_tbl['sqrt'].max() # Fixing Bubble Size
# nifty way of ensuring the size of the bubbles is proportional and reasonable
sizeFactor = maxSize / cMaxSize
final_tbl['sqrt'] = final_tbl['sqrt'] * sizeFactor
# making the tooltip column for our charts
final_tbl['text'] = (
"There is a " + final_tbl[TBL_VOLUME].map(str) + " " + currency + " order for " + symbol + final_tbl[
TBL_PRICE].map(str) + " being offered by " + final_tbl['n_unique_orders'].map(
str) + " unique orders worth " + symbol + final_tbl['total_price'].map(str))
# determine buys / sells relative to last market price; colors price bubbles based on size
# Buys are green, Sells are Red. Probably WHALES are highlighted by being brighter, detected by unqiue order count.
final_tbl['colorintensity'] = final_tbl['n_unique_orders'].apply(calcColor)
final_tbl.loc[(final_tbl[TBL_PRICE] > mp), 'color'] = \
'rgb(' + final_tbl.loc[(final_tbl[TBL_PRICE] >
mp), 'colorintensity'].map(str) + ',0,0)'
final_tbl.loc[(final_tbl[TBL_PRICE] <= mp), 'color'] = \
'rgb(0,' + final_tbl.loc[(final_tbl[TBL_PRICE]
<= mp), 'colorintensity'].map(str) + ',0)'
timeStamps[combined] = timeStampsGet[combined] # now save timestamp of calc start in timestamp used for title
tables[combined] = final_tbl # save table data
marketPrice[combined] = mp # save market price
depth_ask[combined] = ob_ask
depth_bid[combined] = ob_bid
pair.newData = True
pair.prepare = True # just used for first enabling of send prepare
return True
# begin building the dash itself
app = dash.Dash(__name__)
# app.scripts.append_script({"external_url": js_extern})
# simple layout that can be improved with better CSS/JS later, but it does the job for now
# static_content_before contains all the info we want in our headers that won't be dynamic (for now)
static_content_before = [
html.H2('CRYPTO WHALE WATCHING APP'),
html.H3(html.A('GitHub Link Here (Consider supporting us by giving a star; request new features via "issues" tab)',
href="https://github.com/pmaji/eth_python_tracker")),
html.P([
"Legend: Bright colored mark = likely WHALE ",
"(high volume price point via 1 unique order, or many identical medium-sized orders in a ladder). ", html.Br(),
"Bubbles get darker as the number of unique orders increases. " , html.Br(),
"Hover over bubbles for more info. Note: volume (x-axis) on log-scale. " , html.Br(),
"Click 'Freeze all' button to halt refresh, "
"and hide/show buttons to pick which currency pairs to display. " , html.Br(),
"Only displays orders >= 1% of the volume of the portion of the order book displayed. ", html.Br(),
"If annotations overlap or bubbles cluster, click 'Freeze all' and then zoom in on the area of interest.", html.Br(),
"See GitHub link above for further details.", html.Br()]),
# Create Div to place a conditionally visible loading animation.
html.Div(id="loader", style= {'display': 'block'}, children=[html.Div(className="loader"), html.Div('Hunting Whales...', className='loader-text')]# <-- This is the line that will be changed by the dropdown callback
)
]
cCache = []
for pair in PAIRS:
ticker = pair.ticker
exchange = pair.exchange
graph = 'live-graph-' + exchange + "-" + ticker
cCache.append(html.Br())
cCache.append(html.Div(id=graph))
static_content_after = dcc.Interval(
id='main-interval-component',
interval=clientRefresh * 1000
)
app.layout = html.Div(id='main_container', children=[
html.Div(static_content_before),
html.Div(id='graphs_Container', children=cCache),
html.Div(static_content_after),
])
def prepare_data(ticker, exchange):
combined = exchange + ticker
data = get_data_cache(combined)
pair.newData = False
base_currency = ticker.split("-")[1]
ob_ask = depth_ask[combined]
ob_bid = depth_bid[combined]
#Get Minimum and Maximum
ladder_Bid_Min = fixNan(shape_bid[combined]['volume'].min())
ladder_Bid_Max = fixNan(shape_bid[combined]['volume'].max(), False)
ladder_Ask_Min = fixNan(shape_ask[combined]['volume'].min())
ladder_Ask_Max = fixNan(shape_ask[combined]['volume'].max(), False)
data_min = fixNan(data[TBL_VOLUME].min())
data_max = fixNan(data[TBL_VOLUME].max(), False)
ob_bid_max = fixNan(ob_bid[TBL_VOLUME].max(), False)
ob_ask_max = fixNan(ob_ask[TBL_VOLUME].max(), False)
symbol = SYMBOLS.get(base_currency.upper(), "")
x_min = min([ladder_Bid_Min, ladder_Ask_Min, data_min])
x_max = max([ladder_Bid_Max, ladder_Ask_Max, data_max, ob_ask_max, ob_bid_max])
max_unique = max([fixNan(shape_bid[combined]['unique'].max(), False),
fixNan(shape_ask[combined]['unique'].max(), False)])
width_factor = 15
if max_unique > 0: width_factor = 15 / max_unique
market_price = marketPrice[combined]
bid_trace = go.Scatter(
x=[], y=[],
text=[],
mode='markers', hoverinfo='text',
marker=dict(opacity=0, color='rgb(0,255,0)'))
ask_trace = go.Scatter(
x=[], y=[],
text=[],
mode='markers', hoverinfo='text',
marker=dict(opacity=0, color='rgb(255,0,0)'))
shape_arr = [dict(
# Line Horizontal
type='line',
x0=x_min * 0.5, y0=market_price,
x1=x_max * 1.5, y1=market_price,
line=dict(color='rgb(0, 0, 0)', width=2, dash='dash')
)]
annot_arr = [dict(
x=log10((x_max*0.9)), y=market_price, xref='x', yref='y',
text=str(market_price) + symbol,
showarrow=True, arrowhead=7, ax=20, ay=0,
bgcolor='rgb(0,0,255)', font={'color': '#ffffff'}
)]
# delete these 10 lines below if we want to move to a JS-based coloring system in the future
shape_arr.append(dict(type='rect',
x0=x_min, y0=market_price,
x1=x_max, y1=market_price * 1.05,
line=dict(color='rgb(255, 0, 0)', width=0.01),
fillcolor='rgba(255, 0, 0, 0.04)'))
shape_arr.append(dict(type='rect',
x0=x_min, y0=market_price,
x1=x_max, y1=market_price * 0.95,
line=dict(color='rgb(0, 255, 0)', width=0.01),
fillcolor='rgba(0, 255, 0, 0.04)'))
for index, row in shape_bid[combined].iterrows():
cWidth = row['unique'] * width_factor
vol = row[TBL_VOLUME]
posY = (row['min_Price'] + row['max_Price']) / 2.0
if cWidth > 15:
cWidth = 15
elif cWidth < 2:
cWidth = 2
shape_arr.append(dict(type='line',
opacity=0.5,
x0=vol, y0=row['min_Price'],
x1=vol, y1=row['max_Price'],
line=dict(color='rgb(0, 255, 0)', width=cWidth)))
bid_trace['x'].append(vol)
bid_trace['y'].append(row['min_Price'])
bid_trace['text'].append(row['text'])
bid_trace['text'].append(row['text'])
bid_trace['x'].append(vol)
bid_trace['y'].append(posY)
bid_trace['x'].append(vol)
bid_trace['y'].append(row['max_Price'])
bid_trace['text'].append(row['text'])
for index, row in shape_ask[combined].iterrows():
cWidth = row['unique'] * width_factor
vol = row[TBL_VOLUME]
posY = (row['min_Price'] + row['max_Price']) / 2.0
if cWidth > 15:
cWidth = 15
elif cWidth < 2:
cWidth = 2
shape_arr.append(dict(type='line',
opacity=0.5,
x0=vol, y0=row['min_Price'],
x1=vol, y1=row['max_Price'],
line=dict(color='rgb(255, 0, 0)', width=cWidth)))
ask_trace['x'].append(vol)
ask_trace['y'].append(row['min_Price'])
ask_trace['text'].append(row['text'])
ask_trace['x'].append(vol)
ask_trace['y'].append(posY)
ask_trace['text'].append(row['text'])
ask_trace['x'].append(vol)
ask_trace['y'].append(row['max_Price'])
ask_trace['text'].append(row['text'])
result = {
'data': [
go.Scatter(
x=data[TBL_VOLUME],
y=data[TBL_PRICE],
mode='markers',
text=data['text'],
opacity=0.95,
hoverinfo='text',
marker={
'size': data['sqrt'],
'line': {'width': 0.5, 'color': 'white'},
'color': data['color']
},
), ask_trace, bid_trace, go.Scatter(
x=ob_ask[TBL_VOLUME],
y=ob_ask[TBL_PRICE],
mode='lines',
opacity=0.5,
hoverinfo='text',
text=ob_ask['text'],
line = dict(color = ('rgb(255, 0, 0)'),
width = 2)
),go.Scatter(
x=ob_bid[TBL_VOLUME],
y=ob_bid[TBL_PRICE],
mode='lines',
opacity=0.5,
hoverinfo='text',
text=ob_bid['text'],
line = dict(color = ('rgb(0, 255, 0)'),
width = 2)
)
],
'layout': go.Layout(
# title automatically updates with refreshed market price
title=("The present market price of {} on {} is: {}{} at {}".format(ticker, exchange, symbol,
str(
marketPrice[combined]),
timeStamps[combined])),
xaxis=dict(title='Order Size', type='log', autotick=True,range=[log10(x_min*0.95), log10(x_max*1.03)]),
yaxis={'title': '{} Price'.format(ticker),'range':[market_price*0.94, market_price*1.06]},
hovermode='closest',
# now code to ensure the sizing is right
margin=go.Margin(
l=75, r=75,
b=50, t=50,
pad=4),
paper_bgcolor='rgba(0,0,0,0)', # set bg to be transparent, works with themes.
plot_bgcolor='rgba(0,0,0,0)', # set bg to be transparent, works with themes.
# adding the horizontal reference line at market price
shapes=shape_arr,
annotations=annot_arr,
showlegend=False
)
}
return result
def prepare_send():
lCache = []
cData = get_All_data()
for pair in PAIRS:
ticker = pair.ticker
exchange = pair.exchange
graph = 'live-graph-' + exchange + "-" + ticker
lCache.append(html.Br())
if (pair.Dataprepared):
lCache.append(dcc.Graph(
className='plot',
id=graph,
figure=cData[exchange + ticker]
))
else:
lCache.append(html.Div(className='plot', id=graph))
return lCache
# links up the chart creation to the interval for an auto-refresh
# creates one callback per currency pairing; easy to replicate / add new pairs
@app.callback(Output('graphs_Container', 'children'),
events=[Event('main-interval-component', 'interval')])
def update_Site_data():
return getSendCache()
# explanatory comment here to come
def round_sig(x, sig=3, overwrite=0, minimum=0):
if (x == 0):
return 0.0
elif overwrite > 0:
return round(x, overwrite)
else:
digits = -int(floor(log10(abs(x)))) + (sig - 1)
if digits <= minimum:
return round(x, minimum)
else:
return round(x, digits)
# explanatory comment here to come
def calcColor(x):
response = round(400 / x)
if response > 255:
response = 255
elif response < 30:
response = 30
return response
def fixNan(x, pMin=True):
if isnan(x):
if pMin:
return 99999
else:
return 0
else:
return x
def getStamp():
return int(round(time.time() * 1000))
# watchdog to catch any instances where refresh stops
def watchdog():
global PAIRS
tServer = threading.Thread(target=serverThread)
tServer.daemon = False
tServer.start()
time.sleep(3) # get Server start
log(2,"Server should be running now")
tPreparer = threading.Thread(target=sendPrepareThread)
tPreparer.daemon = False
tPreparer.start()
for pair in PAIRS:
pair.threadWebsocket = threading.Thread(
target=websockThread, args=(pair,))
pair.threadWebsocket.daemon = False
pair.threadWebsocket.start()
time.sleep(3)
log(2,"Web sockets up")
for pair in PAIRS:
pair.threadRecalc = threading.Thread(target=recalcThread, args=(pair,))
pair.threadRecalc.daemon = False
pair.threadRecalc.start()
time.sleep(2.5)
log(2,"ReCalc up")
for pair in PAIRS:
pair.threadPrepare = threading.Thread(
target=preparePairThread, args=(pair,))
pair.threadPrepare.daemon = False
pair.threadPrepare.start()
log(2,"Everything should be running now, starting Watchdog, to control the herd")
while True:
time.sleep(2)
alive = True
for pair in PAIRS:
if not pair.threadRecalc.isAlive():
alive = False
log(2,"Restarting pair Recalc " +
pair.exchange + " " + pair.ticker)
pair.threadRecalc = threading.Thread(
target=recalcThread, args=(pair,))
pair.threadRecalc.daemon = False
pair.threadRecalc.start()
if not pair.threadWebsocket.isAlive():
alive = False
log(2,"Restarting pair Web socket " +
pair.exchange + " " + pair.ticker)
pair.webSocketKill = 1
pair.threadWebsocket = threading.Thread(
target=websockThread, args=(pair,))
pair.threadWebsocket.daemon = False
pair.threadWebsocket.start()
if not pair.threadPrepare.isAlive():
alive = False
log(2,"Restarting pair Prepare worker " +
pair.exchange + " " + pair.ticker)
pair.threadPrepare = threading.Thread(
target=preparePairThread, args=(pair,))
pair.threadPrepare.daemon = False
pair.threadPrepare.start()
if not tServer.isAlive():
alive = False
log(3,"Watchdog detected dead Server, restarting")
tServer = threading.Thread(target=serverThread)
tServer.daemon = False
tServer.start()
if not tPreparer.isAlive():
alive = False
log(3,"Watchdog detected dead Preparer, restarting")
tPreparer = threading.Thread(target=sendPrepareThread)
tPreparer.daemon = False
tPreparer.start()
if not alive:
log(3,"Watchdog got some bad sheeps back to group")
def serverThread():
app.run_server(host='0.0.0.0', port=serverPort)
def sendPrepareThread():
global sendCache, first_prepare, overallNewData
while True:
sendCache = prepare_send()
overallNewData = False
time.sleep(0.5)
while not overallNewData:
time.sleep(0.5)
def recalcThread(pair):
count = 0
refreshes = 0
while True:
if (pair.websocket):
dif = getStamp() - pair.lastStamp
if dif > desiredPairRefresh:
log(1,"Ms Diff for " + pair.ticker + " is " + str(
dif) + " Total refreshes for pair " + str(refreshes))
refreshes += 1
if not calc_data(pair):
count = count + 1
else:
count = 0
pair.lastStamp = pair.usedStamp
if count > 5:
log(3,"Going to kill Web socket from " + pair.ticker)
count = -5
pair.webSocketKill = 0
else:
time.sleep((desiredPairRefresh - dif) / 1000)
def websockThread(pair):
pair.websocket = False
pair.ob_Inst = GDaxBook(pair.ticker)
time.sleep(5)
pair.websocket = True
while True:
kill = 5 / pair.webSocketKill
time.sleep(4)
def preparePairThread(pair):
global prepared, overallNewData
ticker = pair.ticker
exc = pair.exchange
cbn = exc + ticker
while True:
if (pair.prepare):
prepared[cbn] = prepare_data(ticker, exc)
overallNewData = True
pair.Dataprepared = True
while not pair.newData:
time.sleep(0.2)
def handleArgs(argv):
global serverPort, debugLevel, desiredPairRefresh
try:
opts, args = getopt.getopt(
argv, "hp:d:", ["port=","debug=","pRefresh="])
except getopt.GetoptError:
print('app.py -h')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('app.py --port 8050 --pRefresh')
print('--pRefresh indicates the refresh Rate in ms')
sys.exit()
elif opt in ("-p", "--port"):
serverPort = int(arg)
elif opt in ("-d", "--debug"):
debugLevel = int(arg)
elif opt in ("--pRefresh"):
desiredPairRefresh = int(arg)
log(4,"Legend: This is an error message")
log(3,"Legend: This is a warning message")
log(2,"Legend: This is an info message")
log(1,"Legend: This is a debug message")
log(0,"Legend: This is a deep debug message")
log(1,'Web Interface Port is ' + str(serverPort))
log(1,'Debug Level is ' + str(debugLevel))
if __name__ == '__main__':
# Initial Load of Data
handleArgs(sys.argv[1:])
watchdog()
|
#!/usr/bin/env python3
from string import ascii_lowercase
medical = open('medical.txt', 'r').read().splitlines()
positive = open('positive.txt', 'r').read().splitlines()
print('Missing the following initials:')
initials = list(map(lambda s: s[0], medical))
for c in ascii_lowercase:
if c not in initials:
print(c, end='')
print('\n')
for i, txtfile in enumerate([medical, positive]):
print('The following are repeated in text file ' + str(i) + ':')
for w in txtfile:
if medical.count(w) > 1:
print(w)
|
from dataclasses import dataclass
__NAMESPACE__ = "http://www.opengis.net/fes/2.0"
@dataclass
class LogicalOperators:
class Meta:
namespace = "http://www.opengis.net/fes/2.0"
|
import hashlib
import os
import tempfile
from attic.hashindex import NSIndex, ChunkIndex
from attic.testsuite import AtticTestCase
class HashIndexTestCase(AtticTestCase):
def _generic_test(self, cls, make_value, sha):
idx = cls()
self.assert_equal(len(idx), 0)
# Test set
for x in range(100):
idx[bytes('%-32d' % x, 'ascii')] = make_value(x)
self.assert_equal(len(idx), 100)
for x in range(100):
self.assert_equal(idx[bytes('%-32d' % x, 'ascii')], make_value(x))
# Test update
for x in range(100):
idx[bytes('%-32d' % x, 'ascii')] = make_value(x * 2)
self.assert_equal(len(idx), 100)
for x in range(100):
self.assert_equal(idx[bytes('%-32d' % x, 'ascii')], make_value(x * 2))
# Test delete
for x in range(50):
del idx[bytes('%-32d' % x, 'ascii')]
self.assert_equal(len(idx), 50)
idx_name = tempfile.NamedTemporaryFile()
idx.write(idx_name.name)
del idx
# Verify file contents
with open(idx_name.name, 'rb') as fd:
self.assert_equal(hashlib.sha256(fd.read()).hexdigest(), sha)
# Make sure we can open the file
idx = cls.read(idx_name.name)
self.assert_equal(len(idx), 50)
for x in range(50, 100):
self.assert_equal(idx[bytes('%-32d' % x, 'ascii')], make_value(x * 2))
idx.clear()
self.assert_equal(len(idx), 0)
idx.write(idx_name.name)
del idx
self.assert_equal(len(cls.read(idx_name.name)), 0)
def test_nsindex(self):
self._generic_test(
NSIndex, lambda x: (x, x),
'369a18ae6a52524eb2884a3c0fdc2824947edd017a2688c5d4d7b3510c245ab9')
def test_chunkindex(self):
self._generic_test(
ChunkIndex, lambda x: (x, x, x),
'ed22e8a883400453c0ee79a06c54df72c994a54eeefdc6c0989efdc5ee6d07b7')
def test_resize(self):
n = 2000 # Must be >= MIN_BUCKETS
idx_name = tempfile.NamedTemporaryFile()
idx = NSIndex()
idx.write(idx_name.name)
initial_size = os.path.getsize(idx_name.name)
self.assert_equal(len(idx), 0)
for x in range(n):
idx[bytes('%-32d' % x, 'ascii')] = x, x
idx.write(idx_name.name)
self.assert_true(initial_size < os.path.getsize(idx_name.name))
for x in range(n):
del idx[bytes('%-32d' % x, 'ascii')]
self.assert_equal(len(idx), 0)
idx.write(idx_name.name)
self.assert_equal(initial_size, os.path.getsize(idx_name.name))
def test_iteritems(self):
idx = NSIndex()
for x in range(100):
idx[bytes('%-0.32d' % x, 'ascii')] = x, x
all = list(idx.iteritems())
self.assert_equal(len(all), 100)
second_half = list(idx.iteritems(marker=all[49][0]))
self.assert_equal(len(second_half), 50)
self.assert_equal(second_half, all[50:])
|
from ansible.utils import parse_kv, template
class ActionModule(object):
"""
TODO: FIXME when upgrading to Ansible 2.x
"""
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
self.basedir = runner.basedir
def _arg_or_fact(self, arg_name, fact_name, args, inject):
res = args.get(arg_name)
if res is not None:
return res
template_string = '{{ %s }}' % fact_name
res = template.template(self.basedir, template_string, inject)
return None if res == template_string else res
def _merge_args(self, module_args, complex_args):
args = {}
if complex_args:
args.update(complex_args)
kv = parse_kv(module_args)
args.update(kv)
return args
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
args = self._merge_args(module_args, complex_args)
check_handler = self._arg_or_fact('handler', 'monitoring.check_handler', args, inject)
complex_args['handler'] = check_handler
module_return = self.runner._execute_module(conn, tmp, 'sensu_check', module_args, inject=inject,
complex_args=args)
return module_return
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import popart.ir as pir
import popart.ir.ops as ops
import popart._internal.ir as _ir
from popart.ir import dtypes
from utils import contains_op_of_type
class TestScatter:
def test_fn(self):
ir = pir.Ir()
g = ir.main_graph()
with g:
t = pir.variable([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
])
indices = pir.variable([
[1, 0, 2],
[0, 2, 1],
],
dtype=dtypes.int32)
values = pir.variable([
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
])
c = ops.scatter(t, indices, values)
assert len(g.get_tensors()) == 4
assert len(g.get_variables()) == 3
assert contains_op_of_type("Scatter", _ir.op.ScatterOp, g)
|
from lamby.src.config import config
from lamby.src.init import init
def test_init(runner):
with runner.isolated_filesystem():
lamby_dir = './.lamby'
runner.invoke(init)
key = "key"
value = "value"
change_value = "changed value"
compare_line = "{\"key\": \"value\"}"
change_line = "{\"key\": \"changed value\"}"
# test add #
result = runner.invoke(config, ['--add', key, value])
assert result.exit_code == 0
with open(lamby_dir + '/config', "r") as file:
for line in file:
assert(line == compare_line)
# test change #
result = runner.invoke(config, ['--change', key, change_value])
assert result.exit_code == 0
with open(lamby_dir + '/config', "r") as file:
for line in file:
assert(line == change_line)
# test remove #
result = runner.invoke(config, ['--remove', key])
file = open(lamby_dir + '/config', "r")
assert(file.read() == "{}")
|
import json
import os
import shutil
import subprocess
import requests
from string import Template
from osm_export_tool.sql import to_prefix
import shapely.geometry
# path must return a path to an .osm.pbf or .osm.xml on the filesystem
class Pbf:
def __init__(self,path):
self._path = path
def fetch(self):
pass
def path(self):
return self._path
class OsmExpress:
def __init__(self,osmx_path,db_path,geom,output_path,use_existing=True,tempdir=None):
self.osmx_path = osmx_path
self.db_path = db_path
self.geom = geom
self.output_path = output_path
self.use_existing = use_existing
self.tempdir = tempdir
def fetch(self):
region_json = os.path.join(self.tempdir,'region.json')
with open(region_json,'w') as f:
f.write(json.dumps(shapely.geometry.mapping(self.geom)))
subprocess.check_call([self.osmx_path,'extract',self.db_path,self.output_path,'--region',region_json])
os.remove(region_json)
def path(self):
if os.path.isfile(self.output_path) and self.use_existing:
return self.output_path
else:
self.fetch()
return self.output_path
class OsmiumTool:
def __init__(self,osmium_path,source_path,geom,output_path,use_existing=True,tempdir=None):
self.osmium_path = osmium_path
self.source_path = source_path
self.geom = geom
self.output_path = output_path
self.use_existing = use_existing
self.tempdir = tempdir
def fetch(self):
region_json = os.path.join(self.tempdir,'region.json')
with open(region_json,'w') as f:
f.write(json.dumps({'type':'Feature','geometry':shapely.geometry.mapping(self.geom)}))
subprocess.check_call([self.osmium_path,'extract','-p',region_json,self.source_path,'-o',self.output_path,'--overwrite'])
os.remove(region_json)
def path(self):
if os.path.isfile(self.output_path) and self.use_existing:
return self.output_path
else:
self.fetch()
return self.output_path
class Overpass:
@classmethod
def filters(cls,mapping):
nodes = set()
ways = set()
relations = set()
for t in mapping.themes:
parts = cls.parts(t.matcher.expr)
if t.points:
for part in parts:
nodes.add(part)
if t.lines:
for part in parts:
ways.add(part)
if t.polygons:
for part in parts:
ways.add(part)
relations.add(part)
return nodes,ways,relations
# force quoting of strings to handle keys with colons
@classmethod
def parts(cls, expr):
def _parts(prefix):
op = prefix[0]
if op == '=':
return ["['{0}'='{1}']".format(prefix[1],prefix[2])]
if op == '!=':
return ["['{0}'!='{1}']".format(prefix[1],prefix[2])]
if op in ['<','>','<=','>='] or op == 'notnull':
return ["['{0}']".format(prefix[1])]
if op == 'in':
x = "['{0}'~'{1}']".format(prefix[1],'|'.join(prefix[2]))
return [x]
if op == 'and' or op == 'or':
return _parts(prefix[1]) + _parts(prefix[2])
return _parts(expr)
@classmethod
def sql(cls,str):
return cls.parts(to_prefix(str))
def __init__(self,hostname,geom,path,use_existing=True,tempdir=None,osmconvert_path='osmconvert',mapping=None,use_curl=False):
self.hostname = hostname
self._path = path
self.geom = geom
self.use_existing = use_existing
self.osmconvert_path = osmconvert_path
self.tmp_path = os.path.join(tempdir,'tmp.osm.xml')
self.mapping = mapping
self.use_curl = use_curl
self.tempdir = tempdir
def fetch(self):
base_template = Template('[maxsize:$maxsize][timeout:$timeout];$query;out meta;')
if self.geom.geom_type == 'Polygon':
geom = 'poly:"{0}"'.format(' '.join(['{1} {0}'.format(*x) for x in self.geom.exterior.coords]))
else:
bounds = self.geom.bounds
west = max(bounds[0], -180)
south = max(bounds[1], -90)
east = min(bounds[2], 180)
north = min(bounds[3], 90)
geom = '{1},{0},{3},{2}'.format(west, south, east, north)
if self.mapping:
query = """(
(
{0}
);
(
{1}
);>;
(
{2}
);>>;>;)"""
nodes,ways,relations = Overpass.filters(self.mapping)
nodes = '\n'.join(['node({0}){1};'.format(geom,f) for f in nodes])
ways = '\n'.join(['way({0}){1};'.format(geom,f) for f in ways])
relations = '\n'.join(['relation({0}){1};'.format(geom,f) for f in relations])
query = query.format(nodes,ways,relations)
else:
query = '(node({0});<;>>;>;)'.format(geom)
data = base_template.substitute(maxsize=2147483648,timeout=1600,query=query)
if self.use_curl:
with open(os.path.join(self.tempdir,'query.txt'),'w') as query_txt:
query_txt.write(data)
print(['curl','-X','POST','-d','@'+os.path.join(self.tempdir,'query.txt'),os.path.join(self.hostname,'api','interpreter'),'-o',self.tmp_path])
subprocess.check_call(['curl','-X','POST','-d','@'+os.path.join(self.tempdir,'query.txt'),os.path.join(self.hostname,'api','interpreter'),'-o',self.tmp_path])
else:
with requests.post(os.path.join(self.hostname,'api','interpreter'),data=data, stream=True) as r:
with open(self.tmp_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
with open(self.tmp_path,'r') as f:
sample = [next(f) for x in range(6)]
if 'DOCTYPE html' in sample[1]:
raise Exception('Overpass failure')
if 'remark' in sample[5]:
raise Exception(sample[5])
# run osmconvert on the file
subprocess.check_call([self.osmconvert_path,self.tmp_path,'--out-pbf','-o='+self._path])
os.remove(self.tmp_path)
def path(self):
if os.path.isfile(self._path) and self.use_existing:
return self._path
else:
self.fetch()
return self._path
|
#
# Copyright 2014 Shahriyar Amini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'shahriyar'
__copyright__ = 'Copyright 2014, Shahriyar Amini'
import math
class Grid(object):
INVALID = -1
# hStep and vStep are just numbers but for our implementation we turn them into points
def __init__(self, nw=None, se=None, hStep=-1, vStep=-1):
self.__nw = nw
self.__se = se
self.__hStep = Point(hStep, 0)
self.__vStep = Point(0, vStep)
self.__cells = None
self.__numCols = -1
self.__numRows = -1
self.__numCells = -1
def __repr__(self):
if self.__cells is None or len(self.__cells) <= 0:
return 'Grid is empty.'
strings = []
for cell in self.__cells:
strings.append(str(cell))
return '\n'.join(strings)
def populate(self):
if self.__nw is None or self.__se is None:
return
if self.__hStep is None or self.__hStep <= 0:
return
if self.__vStep is None or self.__vStep <= 0:
return
if self.__nw.distance(self.__se) <= 0:
return
width = self.width()
height = self.height()
if width is None or height is None:
return
self.__numRows = int(math.ceil(float(height) / self.__vStep.Y()))
#print self.__numRows
self.__numCols = int(math.ceil(float(width) / self.__hStep.X()))
#print self.__numCols
self.__numCells = int(self.__numRows * self.__numCols)
#print self.__numCells
self.__cells = []
for i in range(0, self.__numCells):
self.__cells.append(None)
# create the first cell
initCellNW = self.__nw
initCellNE = initCellNW + self.__hStep
initCellSE = initCellNE + self.__vStep
initCellSW = self.__nw + self.__vStep
initVertices = [initCellNW, initCellNE, initCellSE, initCellSW]
self.correctVertices(initVertices)
initCell = Cell(0, initVertices)
self.__cells[0] = initCell
# create the first row
for i in range(1, self.__numCols):
prevCell = self.__cells[i - 1]
prevVertices = prevCell.getVertices()
vertices = [None, None, None, None]
vertices[Cell.INDEX_NW] = prevVertices[Cell.INDEX_NE]
vertices[Cell.INDEX_SW] = prevVertices[Cell.INDEX_SE]
vertices[Cell.INDEX_NE] = vertices[Cell.INDEX_NW] + self.__hStep
vertices[Cell.INDEX_SE] = vertices[Cell.INDEX_SW] + self.__hStep
self.correctVertices(vertices)
self.__cells[i] = Cell(i, vertices)
for i in range(1, self.__numRows):
for j in range(0, self.__numCols):
_id = self.idByIndices(i, j)
northernId = self.idByIndices(i - 1, j)
northernCell = self.__cells[northernId]
northernCellVertices = northernCell.getVertices()
vertices = [None, None, None, None]
vertices[Cell.INDEX_NW] = northernCellVertices[Cell.INDEX_SW]
vertices[Cell.INDEX_NE] = northernCellVertices[Cell.INDEX_SE]
vertices[Cell.INDEX_SE] = vertices[Cell.INDEX_NE] + self.__vStep
vertices[Cell.INDEX_SW] = vertices[Cell.INDEX_NW] + self.__vStep
self.correctVertices(vertices)
self.__cells[_id] = Cell(_id, vertices)
def cellById(self, _id):
if _id is None or _id < 0:
return None
return self.__cells[_id]
def cellByIndex(self, row, col):
_id = self.idByIndices(row, col)
return self.__cells[_id]
def cells(self):
return self.__cells
def cellCenters(self):
return map(lambda x:x.center(), self.__cells)
def cellSpecificPoints(self, index):
return map(lambda x:x.getVertices()[index], self.__cells)
def correctVertices(self, vertices):
if vertices is None:
return None
for vertex in vertices:
if vertex.X() < self.__nw.X():
vertex.setX(self.__nw.X())
if vertex.X() > self.__se.X():
vertex.setX(self.__se.X())
if vertex.Y() < self.__nw.Y():
vertex.setY(self.__nw.Y())
if vertex.Y() > self.__se.Y():
vertex.setY(self.__se.Y())
def idByIndices(self, row, col):
if row is None or col is None:
return None
if row < 0:
return None
if col < 0:
return None
return row * self.__numCols + col
def height(self):
try:
return self.__se.Y() - self.__nw.Y()
except:
return None
def width(self):
try:
return self.__se.X() - self.__nw.X()
except:
return None
def printNWPoints(self):
if self.__cells is None or len(self.__cells) <= 0:
print 'Grid is empty.'
strings = []
for nwPoint in self.cellSpecificPoints(Cell.INDEX_NW):
strings.append('%d, %d' % (nwPoint.X(), nwPoint.Y()))
print '\n'.join(strings)
def printCenterPoints(self):
if self.__cells is None or len(self.__cells) <= 0:
print 'Grid is empty.'
strings = []
for centerPoint in self.cellCenters():
strings.append('%d, %d' % (centerPoint.X(), centerPoint.Y()))
print '\n'.join(strings)
class Cell(object):
INVALID_ID = -1
INDEX_NW = 0
INDEX_NE = 1
INDEX_SE = 2
INDEX_SW = 3
def __init__(self, _id=-1, vertices=None):
self.__id = _id
# index 0 used for initial vertex, index clockwise
# should be 4 points, we assume cells are rectangular
self.__vertices = vertices
def __repr__(self):
string = 'vertices:'
if self.__vertices is None:
string += '\tNone'
else:
for vertex in self.__vertices:
string += '\t' + str(vertex)
return 'id:\t%d\t%s' % (self.getId(), string)
def getId(self):
return self.__id
def setId(self, _id):
self.__id = _id
def getVertices(self):
return self.__vertices
def setVertices(self, vertices):
self.__vertices = vertices
def center(self):
if self.__vertices is None:
return None
sumPoint = Point()
for vertex in self.__vertices:
sumPoint += vertex
return Point(sumPoint.X() / 4.0, sumPoint.Y() / 4.0)
def width(self):
if self.__vertices is None:
return None
return self.__vertices[INDEX_NW].distance(self.__vertices[INDEX_NE])
def height(self):
if self.__vertices is None:
return None
return self.__vertices[INDEX_NW].distance(self.__vertices[INDEX_SW])
class Point(object):
def __init__(self, x=0, y=0):
self.reset(x, y)
def __add__(self, p):
return Point(self.X() + p.X(), self.Y() + p.Y())
def __repr__(self):
return '(%d, %d)' % (self.X(), self.Y())
def distance(self, other):
dx = self.X() - other.X()
dy = self.Y() - other.Y()
return math.hypot(dx, dy)
def reset(self, x=0, y=0):
self.setX(x)
self.setY(y)
def setX(self, x):
self.__x = x
def setY(self, y):
self.__y = y
def X(self):
return self.__x
def Y(self):
return self.__y
if __name__ == "__main__":
nw = Point(0, 20)
se = Point(720 - 1, 1280 -1)
hStep = 50
vStep = 50
g = Grid(nw, se, hStep, vStep)
g.populate()
#print g
g.printNWPoints()
#g.printCenterPoints()
# get the center points and order them by x,y coordinates
centerPoints = g.cellCenters()
s = sorted(centerPoints, key = lambda p: (p.X(), p.Y()))
print '\n', s
# sort s again, this time by y, x coordinates
s = sorted(s, key = lambda p: (p.Y(), p.X()))
print '\n', s
|
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
rete = buildNetwork(2, 8, 1)
ds = SupervisedDataSet(2, 1)
f = 1;
ds.addSample((0, 0), (0,))
ds.addSample((0, 1), (0,))
ds.addSample((1, 1), (1,))
ds.addSample((2, 2), (4,))
ds.addSample((2, 1), (2,))
ds.addSample((2, 5), (10,))
ds.addSample((5, 5), (25,))
trainer = BackpropTrainer(rete, ds)
while f != 0:
trainer.trainUntilConvergence()
x = input('X: ')
y = input('Y: ')
z = rete.activate([x, y])
print(z)
f = input('Corretto? 1>si 2>no 0>esc ')
if f == 1:
ds.addSample((x, y), (z,))
else:
print ('Ok, ho sbagliato')
print ('Arrivederci!')
|
import json
# with open("./dataList.json") as f:
# print(json.loads(f.read()))
result = json.loads('{"username":"david.wei","token":"QFIdEKqb71ZLeJyXT0vh3uAVZLQvVRd9"}')
json.dumps()
print(type(result), result["username"])
|
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""
scales.py: Custom scales used by taurus.qt.qtgui.plot module
"""
from __future__ import print_function
import numpy
from datetime import datetime, timedelta
from time import mktime
from taurus.external.qt import Qt, Qwt5
__all__ = ["DateTimeScaleEngine", "DeltaTimeScaleEngine", "FixedLabelsScaleEngine",
"FancyScaleDraw", "TaurusTimeScaleDraw", "DeltaTimeScaleDraw",
"FixedLabelsScaleDraw"]
def _getDefaultAxisLabelsAlignment(axis, rotation):
'''return a "smart" alignment for the axis labels depending on the axis
and the label rotation
:param axis: (Qwt5.QwtPlot.Axis) the axis
:param rotation: (float) The rotation (in degrees, clockwise-positive)
:return: (Qt.Alignment) an alignment
'''
if axis == Qwt5.QwtPlot.xBottom:
if rotation == 0:
return Qt.Qt.AlignHCenter | Qt.Qt.AlignBottom
elif rotation < 0:
return Qt.Qt.AlignLeft | Qt.Qt.AlignBottom
else:
return Qt.Qt.AlignRight | Qt.Qt.AlignBottom
elif axis == Qwt5.QwtPlot.yLeft:
if rotation == 0:
return Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter
elif rotation < 0:
return Qt.Qt.AlignLeft | Qt.Qt.AlignBottom
else:
return Qt.Qt.AlignLeft | Qt.Qt.AlignTop
elif axis == Qwt5.QwtPlot.yRight:
if rotation == 0:
return Qt.Qt.AlignRight | Qt.Qt.AlignVCenter
elif rotation < 0:
return Qt.Qt.AlignRight | Qt.Qt.AlignTop
else:
return Qt.Qt.AlignRight | Qt.Qt.AlignBottom
elif axis == Qwt5.QwtPlot.xTop:
if rotation == 0:
return Qt.Qt.AlignHCenter | Qt.Qt.AlignTop
elif rotation < 0:
return Qt.Qt.AlignLeft | Qt.Qt.AlignTop
else:
return Qt.Qt.AlignRight | Qt.Qt.AlignTop
class FancyScaleDraw(Qwt5.QwtScaleDraw):
'''This is a scaleDraw with a tuneable palette and label formats'''
def __init__(self, format=None, palette=None):
Qwt5.QwtScaleDraw.__init__(self)
self._labelFormat = format
self._palette = palette
def setPalette(self, palette):
'''pass a QPalette or None to use default'''
self._palette = palette
def getPalette(self):
return self._palette
def setLabelFormat(self, format):
'''pass a format string (e.g. "%g") or None to use default (it uses the locale)'''
self._labelFormat = format
self.invalidateCache() # to force repainting of the labels
def getLabelFormat(self):
'''pass a format string (e.g. "%g") or None to use default (it uses the locale)'''
return self._labelFormat
def label(self, val):
if str(self._labelFormat) == "":
return Qwt5.QwtText()
if self._labelFormat is None:
return Qwt5.QwtScaleDraw.label(self, val)
else:
return Qwt5.QwtText(self._labelFormat % val)
def draw(self, painter, palette):
if self._palette is None:
Qwt5.QwtScaleDraw.draw(self, painter, palette)
else:
Qwt5.QwtScaleDraw.draw(self, painter, self._palette)
class DateTimeScaleEngine(Qwt5.QwtLinearScaleEngine):
def __init__(self, scaleDraw=None):
Qwt5.QwtLinearScaleEngine.__init__(self)
self.setScaleDraw(scaleDraw)
def setScaleDraw(self, scaleDraw):
self._scaleDraw = scaleDraw
def scaleDraw(self):
return self._scaleDraw
def divideScale(self, x1, x2, maxMajSteps, maxMinSteps, stepSize):
''' Reimplements Qwt5.QwtLinearScaleEngine.divideScale
**Important**: The stepSize parameter is **ignored**.
:return: (Qwt5.QwtScaleDiv) a scale division whose ticks are aligned with
the natural time units '''
# if stepSize != 0:
# scaleDiv = Qwt5.QwtLinearScaleEngine.divideScale(self, x1, x2, maxMajSteps, maxMinSteps, stepSize)
# scaleDiv.datetimeLabelFormat = "%Y/%m/%d %H:%M%S.%f"
# return scaleDiv
interval = Qwt5.QwtDoubleInterval(x1, x2).normalized()
if interval.width() <= 0:
return Qwt5.QwtScaleDiv()
dt1 = datetime.fromtimestamp(interval.minValue())
dt2 = datetime.fromtimestamp(interval.maxValue())
if dt1.year < 1900 or dt2.year > 9999: # limits in time.mktime and datetime
return Qwt5.QwtScaleDiv()
majticks = []
medticks = []
minticks = []
dx = interval.width()
# = 3600s*24*(365+366) = 2 years (counting a leap year)
if dx > 63072001:
format = "%Y"
for y in range(dt1.year + 1, dt2.year):
dt = datetime(year=y, month=1, day=1)
majticks.append(mktime(dt.timetuple()))
elif dx > 5270400: # = 3600s*24*61 = 61 days
format = "%Y %b"
d = timedelta(days=31)
dt = dt1.replace(day=1, hour=0, minute=0,
second=0, microsecond=0) + d
while(dt < dt2):
# make sure that we are on day 1 (even if always sum 31 days)
dt = dt.replace(day=1)
majticks.append(mktime(dt.timetuple()))
dt += d
elif dx > 172800: # 3600s24*2 = 2 days
format = "%b/%d"
d = timedelta(days=1)
dt = dt1.replace(hour=0, minute=0, second=0, microsecond=0) + d
while(dt < dt2):
majticks.append(mktime(dt.timetuple()))
dt += d
elif dx > 7200: # 3600s*2 = 2hours
format = "%b/%d-%Hh"
d = timedelta(hours=1)
dt = dt1.replace(minute=0, second=0, microsecond=0) + d
while(dt < dt2):
majticks.append(mktime(dt.timetuple()))
dt += d
elif dx > 1200: # 60s*20 =20 minutes
format = "%H:%M"
d = timedelta(minutes=10)
dt = dt1.replace(minute=(dt1.minute // 10) * 10,
second=0, microsecond=0) + d
while(dt < dt2):
majticks.append(mktime(dt.timetuple()))
dt += d
elif dx > 120: # =60s*2 = 2 minutes
format = "%H:%M"
d = timedelta(minutes=1)
dt = dt1.replace(second=0, microsecond=0) + d
while(dt < dt2):
majticks.append(mktime(dt.timetuple()))
dt += d
elif dx > 20: # 20 s
format = "%H:%M:%S"
d = timedelta(seconds=10)
dt = dt1.replace(second=(dt1.second // 10) * 10, microsecond=0) + d
while(dt < dt2):
majticks.append(mktime(dt.timetuple()))
dt += d
elif dx > 2: # 2s
format = "%H:%M:%S"
majticks = list(range(int(x1) + 1, int(x2)))
else: # less than 2s (show microseconds)
scaleDiv = Qwt5.QwtLinearScaleEngine.divideScale(
self, x1, x2, maxMajSteps, maxMinSteps, stepSize)
self.scaleDraw().setDatetimeLabelFormat("%S.%f")
return scaleDiv
# make sure to comply with maxMajTicks
L = len(majticks)
if L > maxMajSteps:
majticks = majticks[::int(numpy.ceil(float(L) / maxMajSteps))]
scaleDiv = Qwt5.QwtScaleDiv(interval, minticks, medticks, majticks)
self.scaleDraw().setDatetimeLabelFormat(format)
if x1 > x2:
scaleDiv.invert()
# START DEBUG
# print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# for tk in scaleDiv.ticks(scaleDiv.MajorTick):
# print datetime.fromtimestamp(tk).isoformat()
# print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# END DEBUG
return scaleDiv
@staticmethod
def getDefaultAxisLabelsAlignment(axis, rotation):
'''return a "smart" alignment for the axis labels depending on the axis
and the label rotation
:param axis: (Qwt5.QwtPlot.Axis) the axis
:param rotation: (float) The rotation (in degrees, clockwise-positive)
:return: (Qt.Alignment) an alignment
'''
return _getDefaultAxisLabelsAlignment(axis, rotation)
@staticmethod
def enableInAxis(plot, axis, scaleDraw=None, rotation=None):
'''convenience method that will enable this engine in the given
axis. Note that it changes the ScaleDraw as well.
:param plot: (Qwt5.QwtPlot) the plot to change
:param axis: (Qwt5.QwtPlot.Axis) the id of the axis
:param scaleDraw: (Qwt5.QwtScaleDraw) Scale draw to use. If None given,
the current ScaleDraw for the plot will be used if
possible, and a :class:`TaurusTimeScaleDraw` will be set if not
:param rotation: (float or None) The rotation of the labels (in degrees, clockwise-positive)
'''
if scaleDraw is None:
scaleDraw = plot.axisScaleDraw(axis)
if not isinstance(scaleDraw, TaurusTimeScaleDraw):
scaleDraw = TaurusTimeScaleDraw()
plot.setAxisScaleDraw(axis, scaleDraw)
plot.setAxisScaleEngine(axis, DateTimeScaleEngine(scaleDraw))
if rotation is not None:
alignment = DateTimeScaleEngine.getDefaultAxisLabelsAlignment(
axis, rotation)
plot.setAxisLabelRotation(axis, rotation)
plot.setAxisLabelAlignment(axis, alignment)
@staticmethod
def disableInAxis(plot, axis, scaleDraw=None, scaleEngine=None):
'''convenience method that will disable this engine in the given
axis. Note that it changes the ScaleDraw as well.
:param plot: (Qwt5.QwtPlot) the plot to change
:param axis: (Qwt5.QwtPlot.Axis) the id of the axis
:param scaleDraw: (Qwt5.QwtScaleDraw) Scale draw to use. If None given,
a :class:`FancyScaleDraw` will be set
:param scaleEngine: (Qwt5.QwtScaleEngine) Scale draw to use. If None given,
a :class:`Qwt5.QwtLinearScaleEngine` will be set
'''
if scaleDraw is None:
scaleDraw = FancyScaleDraw()
if scaleEngine is None:
scaleEngine = Qwt5.QwtLinearScaleEngine()
plot.setAxisScaleEngine(axis, scaleEngine)
plot.setAxisScaleDraw(axis, scaleDraw)
class TaurusTimeScaleDraw(FancyScaleDraw):
def __init__(self, *args):
FancyScaleDraw.__init__(self, *args)
def setDatetimeLabelFormat(self, format):
self._datetimeLabelFormat = format
def datetimeLabelFormat(self):
return self._datetimeLabelFormat
def label(self, val):
if str(self._labelFormat) == "":
return Qwt5.QwtText()
# From val to a string with time
t = datetime.fromtimestamp(val)
try: # If the scaleDiv was created by a DateTimeScaleEngine it has a _datetimeLabelFormat
s = t.strftime(self._datetimeLabelFormat)
except AttributeError:
print("Warning: cannot get the datetime label format (Are you using a DateTimeScaleEngine?)")
s = t.isoformat(' ')
return Qwt5.QwtText(s)
class DeltaTimeScaleEngine(Qwt5.QwtLinearScaleEngine):
def __init__(self, scaleDraw=None):
Qwt5.QwtLinearScaleEngine.__init__(self)
self.setScaleDraw(scaleDraw)
def setScaleDraw(self, scaleDraw):
self._scaleDraw = scaleDraw
def scaleDraw(self):
return self._scaleDraw
def divideScale(self, x1, x2, maxMajSteps, maxMinSteps, stepSize):
''' Reimplements Qwt5.QwtLinearScaleEngine.divideScale
:return: (Qwt5.QwtScaleDiv) a scale division whose ticks are aligned with
the natural delta time units '''
interval = Qwt5.QwtDoubleInterval(x1, x2).normalized()
if interval.width() <= 0:
return Qwt5.QwtScaleDiv()
d_range = interval.width()
if d_range < 2: # 2s
return Qwt5.QwtLinearScaleEngine.divideScale(self, x1, x2, maxMajSteps, maxMinSteps, stepSize)
elif d_range < 20: # 20 s
s = 1
elif d_range < 120: # =60s*2 = 2 minutes
s = 10
elif d_range < 1200: # 60s*20 =20 minutes
s = 60
elif d_range < 7200: # 3600s*2 = 2 hours
s = 600
elif d_range < 172800: # 3600s24*2 = 2 days
s = 3600
else:
s = 86400 # 1 day
# calculate a step size that respects the base step (s) and also
# enforces the maxMajSteps
stepSize = s * int(numpy.ceil(float(d_range // s) / maxMajSteps))
return Qwt5.QwtLinearScaleEngine.divideScale(self, x1, x2, maxMajSteps, maxMinSteps, stepSize)
@staticmethod
def getDefaultAxisLabelsAlignment(axis, rotation):
'''return a "smart" alignment for the axis labels depending on the axis
and the label rotation
:param axis: (Qwt5.QwtPlot.Axis) the axis
:param rotation: (float) The rotation (in degrees, clockwise-positive)
:return: (Qt.Alignment) an alignment
'''
return _getDefaultAxisLabelsAlignment(axis, rotation)
@staticmethod
def enableInAxis(plot, axis, scaleDraw=None, rotation=None):
'''convenience method that will enable this engine in the given
axis. Note that it changes the ScaleDraw as well.
:param plot: (Qwt5.QwtPlot) the plot to change
:param axis: (Qwt5.QwtPlot.Axis) the id of the axis
:param scaleDraw: (Qwt5.QwtScaleDraw) Scale draw to use. If None given,
the current ScaleDraw for the plot will be used if
possible, and a :class:`TaurusTimeScaleDraw` will be set if not
:param rotation: (float or None) The rotation of the labels (in degrees, clockwise-positive)
'''
if scaleDraw is None:
scaleDraw = plot.axisScaleDraw(axis)
if not isinstance(scaleDraw, DeltaTimeScaleDraw):
scaleDraw = DeltaTimeScaleDraw()
plot.setAxisScaleDraw(axis, scaleDraw)
plot.setAxisScaleEngine(axis, DeltaTimeScaleEngine(scaleDraw))
if rotation is not None:
alignment = DeltaTimeScaleEngine.getDefaultAxisLabelsAlignment(
axis, rotation)
plot.setAxisLabelRotation(axis, rotation)
plot.setAxisLabelAlignment(axis, alignment)
@staticmethod
def disableInAxis(plot, axis, scaleDraw=None, scaleEngine=None):
'''convenience method that will disable this engine in the given
axis. Note that it changes the ScaleDraw as well.
:param plot: (Qwt5.QwtPlot) the plot to change
:param axis: (Qwt5.QwtPlot.Axis) the id of the axis
:param scaleDraw: (Qwt5.QwtScaleDraw) Scale draw to use. If None given,
a :class:`FancyScaleDraw` will be set
:param scaleEngine: (Qwt5.QwtScaleEngine) Scale draw to use. If None given,
a :class:`Qwt5.QwtLinearScaleEngine` will be set
'''
if scaleDraw is None:
scaleDraw = FancyScaleDraw()
if scaleEngine is None:
scaleEngine = Qwt5.QwtLinearScaleEngine()
plot.setAxisScaleEngine(axis, scaleEngine)
plot.setAxisScaleDraw(axis, scaleDraw)
class DeltaTimeScaleDraw(FancyScaleDraw):
def __init__(self, *args):
FancyScaleDraw.__init__(self, *args)
def label(self, val):
if val >= 0:
s = "+%s" % str(timedelta(seconds=val))
else:
s = "-%s" % str(timedelta(seconds=-val))
return Qwt5.QwtText(s)
class FixedLabelsScaleEngine(Qwt5.QwtLinearScaleEngine):
def __init__(self, positions):
'''labels is a sequence of (pos,label) tuples where pos is the point
at wich to draw the label and label is given as a python string (or QwtText)'''
Qwt5.QwtScaleEngine.__init__(self)
self._positions = positions
# self.setAttribute(self.Floating,True)
def divideScale(self, x1, x2, maxMajSteps, maxMinSteps, stepSize=0.0):
div = Qwt5.QwtScaleDiv(x1, x2, self._positions, [], [])
div.setTicks(Qwt5.QwtScaleDiv.MajorTick, self._positions)
return div
@staticmethod
def enableInAxis(plot, axis, scaleDraw=None):
'''convenience method that will enable this engine in the given
axis. Note that it changes the ScaleDraw as well.
:param plot: (Qwt5.QwtPlot) the plot to change
:param axis: (Qwt5.QwtPlot.Axis) the id of the axis
:param scaleDraw: (Qwt5.QwtScaleDraw) Scale draw to use. If None given,
the current ScaleDraw for the plot will be used if
possible, and a :class:`FixedLabelsScaleDraw` will be set if not
'''
if scaleDraw is None:
scaleDraw = plot.axisScaleDraw(axis)
if not isinstance(scaleDraw, FixedLabelsScaleDraw):
scaleDraw = FixedLabelsScaleDraw()
plot.setAxisScaleDraw(axis, scaleDraw)
plot.setAxisScaleEngine(axis, FixedLabelsScaleEngine(scaleDraw))
@staticmethod
def disableInAxis(plot, axis, scaleDraw=None, scaleEngine=None):
'''convenience method that will disable this engine in the given
axis. Note that it changes the ScaleDraw as well.
:param plot: (Qwt5.QwtPlot) the plot to change
:param axis: (Qwt5.QwtPlot.Axis) the id of the axis
:param scaleDraw: (Qwt5.QwtScaleDraw) Scale draw to use. If None given,
a :class:`FancyScaleDraw` will be set
:param scaleEngine: (Qwt5.QwtScaleEngine) Scale draw to use. If None given,
a :class:`Qwt5.QwtLinearScaleEngine` will be set
'''
if scaleDraw is None:
scaleDraw = FancyScaleDraw()
if scaleEngine is None:
scaleEngine = Qwt5.QwtLinearScaleEngine()
plot.setAxisScaleEngine(axis, scaleEngine)
plot.setAxisScaleDraw(axis, scaleDraw)
class FixedLabelsScaleDraw(FancyScaleDraw):
def __init__(self, positions, labels):
'''This is a custom ScaleDraw that shows labels at given positions (and nowhere else)
positions is a sequence of points for which labels are defined.
labels is a sequence strings (or QwtText)
Note that the lengths of positions and labels must match'''
if len(positions) != len(labels):
raise ValueError('lengths of positions and labels do not match')
FancyScaleDraw.__init__(self)
self._positions = positions
self._labels = labels
# self._positionsarray = numpy.array(self._positions) #this is stored
# just in case
def label(self, val):
try:
index = self._positions.index(val) # try to find an exact match
except:
index = None # It won't show any label
# use the index of the closest position
#index = (numpy.abs(self._positionsarray - val)).argmin()
if index is not None:
return Qwt5.QwtText(self._labels[index])
else:
Qwt5.QwtText()
|
import requests
from bs4 import BeautifulSoup
from services.trackers.exceptions import UnknownPrice
def get_price() -> float:
link = 'https://www.tgju.org/profile/mesghal'
response = requests.get(link)
if response.status_code != 200:
raise UnknownPrice()
soup = BeautifulSoup(response.content, features='html.parser')
table = soup.find(lambda tag: tag.name == 'table')
tbody = table.find(lambda tag: tag.name == 'tbody')
rows = tbody.find_all(lambda tag: tag.name == 'tr')
current_price_row = rows[0]
current_price = current_price_row.find_all(lambda tag: tag.name == 'td')[1].text
return int(current_price.replace(',', '')) / 10
|
from pathlib import Path
from _utils.misc import (
cmd_exec,
)
from conf.conf import IMAGES_MOUNT_BASE_PATH
class MemDisk:
def __init__(self, image_path, unit):
self.image_path = image_path
self.unit = unit
self.md_path = f"/dev/md{self.unit}"
self.mount_path = Path(IMAGES_MOUNT_BASE_PATH) / str(self.unit)
def __del__(self):
self.unmount()
def __delete(self):
_, stderr = cmd_exec(f"sudo mdconfig -d -u {self.unit}")
if stderr:
raise Exception(f"error deleting memdisk: {stderr}")
def __create(self, replace=True):
stdout, stderr = cmd_exec(f"sudo mdconfig -a -t vnode -f {self.image_path} -u {self.unit}")
if stderr:
if replace:
self.unmount()
self.__create(replace=False)
return
raise Exception(f"error creating memdisk: {stderr}")
def mount(self, fs_type="cd9660"):
self.__create()
if self.mount_path.exists():
self.unmount()
else:
_, stderr = cmd_exec(f"sudo mkdir -p {self.mount_path}")
if stderr:
raise Exception(f"error mounting memdisk: {stderr}")
_, stderr = cmd_exec(f"sudo mount -t {fs_type} {self.md_path} {self.mount_path}")
if stderr:
raise Exception(f"error mounting memdisk: {stderr}")
return self.mount_path
def unmount(self, delete=True):
if self.mount_path.exists():
_, stderr = cmd_exec(f"sudo umount -f {self.mount_path}")
# if stderr:
# raise Exception(f"error umounting memdisk: {stderr}")
if delete:
_, stderr = cmd_exec(f"sudo rm -rf {self.mount_path}")
if stderr:
raise Exception(f"error umounting memdisk: {stderr}")
if delete:
self.__delete()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/monitoring_v3/proto/service.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import (
monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.type import calendar_period_pb2 as google_dot_type_dot_calendar__period__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/monitoring_v3/proto/service.proto",
package="google.monitoring.v3",
syntax="proto3",
serialized_options=_b(
"\n\030com.google.monitoring.v3B\026ServiceMonitoringProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3"
),
serialized_pb=_b(
'\n.google/cloud/monitoring_v3/proto/service.proto\x12\x14google.monitoring.v3\x1a#google/api/monitored_resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a!google/type/calendar_period.proto"\xd6\x04\n\x07Service\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x36\n\x06\x63ustom\x18\x06 \x01(\x0b\x32$.google.monitoring.v3.Service.CustomH\x00\x12=\n\napp_engine\x18\x07 \x01(\x0b\x32\'.google.monitoring.v3.Service.AppEngineH\x00\x12G\n\x0f\x63loud_endpoints\x18\x08 \x01(\x0b\x32,.google.monitoring.v3.Service.CloudEndpointsH\x00\x12\x43\n\rcluster_istio\x18\t \x01(\x0b\x32*.google.monitoring.v3.Service.ClusterIstioH\x00\x12:\n\ttelemetry\x18\r \x01(\x0b\x32\'.google.monitoring.v3.Service.Telemetry\x1a\x08\n\x06\x43ustom\x1a\x1e\n\tAppEngine\x12\x11\n\tmodule_id\x18\x01 \x01(\t\x1a!\n\x0e\x43loudEndpoints\x12\x0f\n\x07service\x18\x01 \x01(\t\x1ag\n\x0c\x43lusterIstio\x12\x10\n\x08location\x18\x01 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x02 \x01(\t\x12\x19\n\x11service_namespace\x18\x03 \x01(\t\x12\x14\n\x0cservice_name\x18\x04 \x01(\t\x1a"\n\tTelemetry\x12\x15\n\rresource_name\x18\x01 \x01(\tB\x0c\n\nidentifier"\xc4\x02\n\x15ServiceLevelObjective\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0b \x01(\t\x12L\n\x17service_level_indicator\x18\x03 \x01(\x0b\x32+.google.monitoring.v3.ServiceLevelIndicator\x12\x0c\n\x04goal\x18\x04 \x01(\x01\x12\x33\n\x0erolling_period\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x36\n\x0f\x63\x61lendar_period\x18\x06 \x01(\x0e\x32\x1b.google.type.CalendarPeriodH\x00"4\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\x08\n\x04\x46ULL\x10\x02\x12\x0c\n\x08\x45XPLICIT\x10\x01\x42\x08\n\x06period"\xd4\x01\n\x15ServiceLevelIndicator\x12\x33\n\tbasic_sli\x18\x04 \x01(\x0b\x32\x1e.google.monitoring.v3.BasicSliH\x00\x12>\n\rrequest_based\x18\x01 \x01(\x0b\x32%.google.monitoring.v3.RequestBasedSliH\x00\x12>\n\rwindows_based\x18\x02 \x01(\x0b\x32%.google.monitoring.v3.WindowsBasedSliH\x00\x42\x06\n\x04type"\xb6\x02\n\x08\x42\x61sicSli\x12\x0e\n\x06method\x18\x07 \x03(\t\x12\x10\n\x08location\x18\x08 \x03(\t\x12\x0f\n\x07version\x18\t \x03(\t\x12K\n\x0c\x61vailability\x18\x02 \x01(\x0b\x32\x33.google.monitoring.v3.BasicSli.AvailabilityCriteriaH\x00\x12\x41\n\x07latency\x18\x03 \x01(\x0b\x32..google.monitoring.v3.BasicSli.LatencyCriteriaH\x00\x1a\x16\n\x14\x41vailabilityCriteria\x1a?\n\x0fLatencyCriteria\x12,\n\tthreshold\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0e\n\x0csli_criteria"!\n\x05Range\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01"\xa1\x01\n\x0fRequestBasedSli\x12\x41\n\x10good_total_ratio\x18\x01 \x01(\x0b\x32%.google.monitoring.v3.TimeSeriesRatioH\x00\x12\x41\n\x10\x64istribution_cut\x18\x03 \x01(\x0b\x32%.google.monitoring.v3.DistributionCutH\x00\x42\x08\n\x06method"h\n\x0fTimeSeriesRatio\x12\x1b\n\x13good_service_filter\x18\x04 \x01(\t\x12\x1a\n\x12\x62\x61\x64_service_filter\x18\x05 \x01(\t\x12\x1c\n\x14total_service_filter\x18\x06 \x01(\t"Z\n\x0f\x44istributionCut\x12\x1b\n\x13\x64istribution_filter\x18\x04 \x01(\t\x12*\n\x05range\x18\x05 \x01(\x0b\x32\x1b.google.monitoring.v3.Range"\x83\x05\n\x0fWindowsBasedSli\x12 \n\x16good_bad_metric_filter\x18\x05 \x01(\tH\x00\x12`\n\x1agood_total_ratio_threshold\x18\x02 \x01(\x0b\x32:.google.monitoring.v3.WindowsBasedSli.PerformanceThresholdH\x00\x12Q\n\x14metric_mean_in_range\x18\x06 \x01(\x0b\x32\x31.google.monitoring.v3.WindowsBasedSli.MetricRangeH\x00\x12P\n\x13metric_sum_in_range\x18\x07 \x01(\x0b\x32\x31.google.monitoring.v3.WindowsBasedSli.MetricRangeH\x00\x12\x30\n\rwindow_period\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x1a\xb0\x01\n\x14PerformanceThreshold\x12<\n\x0bperformance\x18\x01 \x01(\x0b\x32%.google.monitoring.v3.RequestBasedSliH\x00\x12?\n\x15\x62\x61sic_sli_performance\x18\x03 \x01(\x0b\x32\x1e.google.monitoring.v3.BasicSliH\x00\x12\x11\n\tthreshold\x18\x02 \x01(\x01\x42\x06\n\x04type\x1aN\n\x0bMetricRange\x12\x13\n\x0btime_series\x18\x01 \x01(\t\x12*\n\x05range\x18\x04 \x01(\x0b\x32\x1b.google.monitoring.v3.RangeB\x12\n\x10window_criterionB\xae\x01\n\x18\x63om.google.monitoring.v3B\x16ServiceMonitoringProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3b\x06proto3'
),
dependencies=[
google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_type_dot_calendar__period__pb2.DESCRIPTOR,
],
)
_SERVICELEVELOBJECTIVE_VIEW = _descriptor.EnumDescriptor(
name="View",
full_name="google.monitoring.v3.ServiceLevelObjective.View",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="VIEW_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="FULL", index=1, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="EXPLICIT", index=2, number=1, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=1073,
serialized_end=1125,
)
_sym_db.RegisterEnumDescriptor(_SERVICELEVELOBJECTIVE_VIEW)
_SERVICE_CUSTOM = _descriptor.Descriptor(
name="Custom",
full_name="google.monitoring.v3.Service.Custom",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=578,
serialized_end=586,
)
_SERVICE_APPENGINE = _descriptor.Descriptor(
name="AppEngine",
full_name="google.monitoring.v3.Service.AppEngine",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="module_id",
full_name="google.monitoring.v3.Service.AppEngine.module_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=588,
serialized_end=618,
)
_SERVICE_CLOUDENDPOINTS = _descriptor.Descriptor(
name="CloudEndpoints",
full_name="google.monitoring.v3.Service.CloudEndpoints",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="service",
full_name="google.monitoring.v3.Service.CloudEndpoints.service",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=620,
serialized_end=653,
)
_SERVICE_CLUSTERISTIO = _descriptor.Descriptor(
name="ClusterIstio",
full_name="google.monitoring.v3.Service.ClusterIstio",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="location",
full_name="google.monitoring.v3.Service.ClusterIstio.location",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cluster_name",
full_name="google.monitoring.v3.Service.ClusterIstio.cluster_name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="service_namespace",
full_name="google.monitoring.v3.Service.ClusterIstio.service_namespace",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="service_name",
full_name="google.monitoring.v3.Service.ClusterIstio.service_name",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=655,
serialized_end=758,
)
_SERVICE_TELEMETRY = _descriptor.Descriptor(
name="Telemetry",
full_name="google.monitoring.v3.Service.Telemetry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="resource_name",
full_name="google.monitoring.v3.Service.Telemetry.resource_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=760,
serialized_end=794,
)
_SERVICE = _descriptor.Descriptor(
name="Service",
full_name="google.monitoring.v3.Service",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.monitoring.v3.Service.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.monitoring.v3.Service.display_name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="custom",
full_name="google.monitoring.v3.Service.custom",
index=2,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_engine",
full_name="google.monitoring.v3.Service.app_engine",
index=3,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cloud_endpoints",
full_name="google.monitoring.v3.Service.cloud_endpoints",
index=4,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cluster_istio",
full_name="google.monitoring.v3.Service.cluster_istio",
index=5,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="telemetry",
full_name="google.monitoring.v3.Service.telemetry",
index=6,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_SERVICE_CUSTOM,
_SERVICE_APPENGINE,
_SERVICE_CLOUDENDPOINTS,
_SERVICE_CLUSTERISTIO,
_SERVICE_TELEMETRY,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="identifier",
full_name="google.monitoring.v3.Service.identifier",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=210,
serialized_end=808,
)
_SERVICELEVELOBJECTIVE = _descriptor.Descriptor(
name="ServiceLevelObjective",
full_name="google.monitoring.v3.ServiceLevelObjective",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.monitoring.v3.ServiceLevelObjective.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.monitoring.v3.ServiceLevelObjective.display_name",
index=1,
number=11,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="service_level_indicator",
full_name="google.monitoring.v3.ServiceLevelObjective.service_level_indicator",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="goal",
full_name="google.monitoring.v3.ServiceLevelObjective.goal",
index=3,
number=4,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rolling_period",
full_name="google.monitoring.v3.ServiceLevelObjective.rolling_period",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="calendar_period",
full_name="google.monitoring.v3.ServiceLevelObjective.calendar_period",
index=5,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_SERVICELEVELOBJECTIVE_VIEW,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="period",
full_name="google.monitoring.v3.ServiceLevelObjective.period",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=811,
serialized_end=1135,
)
_SERVICELEVELINDICATOR = _descriptor.Descriptor(
name="ServiceLevelIndicator",
full_name="google.monitoring.v3.ServiceLevelIndicator",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="basic_sli",
full_name="google.monitoring.v3.ServiceLevelIndicator.basic_sli",
index=0,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="request_based",
full_name="google.monitoring.v3.ServiceLevelIndicator.request_based",
index=1,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="windows_based",
full_name="google.monitoring.v3.ServiceLevelIndicator.windows_based",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="type",
full_name="google.monitoring.v3.ServiceLevelIndicator.type",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=1138,
serialized_end=1350,
)
_BASICSLI_AVAILABILITYCRITERIA = _descriptor.Descriptor(
name="AvailabilityCriteria",
full_name="google.monitoring.v3.BasicSli.AvailabilityCriteria",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1560,
serialized_end=1582,
)
_BASICSLI_LATENCYCRITERIA = _descriptor.Descriptor(
name="LatencyCriteria",
full_name="google.monitoring.v3.BasicSli.LatencyCriteria",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="threshold",
full_name="google.monitoring.v3.BasicSli.LatencyCriteria.threshold",
index=0,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1584,
serialized_end=1647,
)
_BASICSLI = _descriptor.Descriptor(
name="BasicSli",
full_name="google.monitoring.v3.BasicSli",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="method",
full_name="google.monitoring.v3.BasicSli.method",
index=0,
number=7,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="location",
full_name="google.monitoring.v3.BasicSli.location",
index=1,
number=8,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="version",
full_name="google.monitoring.v3.BasicSli.version",
index=2,
number=9,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="availability",
full_name="google.monitoring.v3.BasicSli.availability",
index=3,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="latency",
full_name="google.monitoring.v3.BasicSli.latency",
index=4,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_BASICSLI_AVAILABILITYCRITERIA, _BASICSLI_LATENCYCRITERIA,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="sli_criteria",
full_name="google.monitoring.v3.BasicSli.sli_criteria",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=1353,
serialized_end=1663,
)
_RANGE = _descriptor.Descriptor(
name="Range",
full_name="google.monitoring.v3.Range",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="min",
full_name="google.monitoring.v3.Range.min",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max",
full_name="google.monitoring.v3.Range.max",
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1665,
serialized_end=1698,
)
_REQUESTBASEDSLI = _descriptor.Descriptor(
name="RequestBasedSli",
full_name="google.monitoring.v3.RequestBasedSli",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="good_total_ratio",
full_name="google.monitoring.v3.RequestBasedSli.good_total_ratio",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="distribution_cut",
full_name="google.monitoring.v3.RequestBasedSli.distribution_cut",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="method",
full_name="google.monitoring.v3.RequestBasedSli.method",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=1701,
serialized_end=1862,
)
_TIMESERIESRATIO = _descriptor.Descriptor(
name="TimeSeriesRatio",
full_name="google.monitoring.v3.TimeSeriesRatio",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="good_service_filter",
full_name="google.monitoring.v3.TimeSeriesRatio.good_service_filter",
index=0,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="bad_service_filter",
full_name="google.monitoring.v3.TimeSeriesRatio.bad_service_filter",
index=1,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="total_service_filter",
full_name="google.monitoring.v3.TimeSeriesRatio.total_service_filter",
index=2,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1864,
serialized_end=1968,
)
_DISTRIBUTIONCUT = _descriptor.Descriptor(
name="DistributionCut",
full_name="google.monitoring.v3.DistributionCut",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="distribution_filter",
full_name="google.monitoring.v3.DistributionCut.distribution_filter",
index=0,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="range",
full_name="google.monitoring.v3.DistributionCut.range",
index=1,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1970,
serialized_end=2060,
)
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD = _descriptor.Descriptor(
name="PerformanceThreshold",
full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="performance",
full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="basic_sli_performance",
full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="threshold",
full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.threshold",
index=2,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="type",
full_name="google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.type",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=2430,
serialized_end=2606,
)
_WINDOWSBASEDSLI_METRICRANGE = _descriptor.Descriptor(
name="MetricRange",
full_name="google.monitoring.v3.WindowsBasedSli.MetricRange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="time_series",
full_name="google.monitoring.v3.WindowsBasedSli.MetricRange.time_series",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="range",
full_name="google.monitoring.v3.WindowsBasedSli.MetricRange.range",
index=1,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2608,
serialized_end=2686,
)
_WINDOWSBASEDSLI = _descriptor.Descriptor(
name="WindowsBasedSli",
full_name="google.monitoring.v3.WindowsBasedSli",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="good_bad_metric_filter",
full_name="google.monitoring.v3.WindowsBasedSli.good_bad_metric_filter",
index=0,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="good_total_ratio_threshold",
full_name="google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="metric_mean_in_range",
full_name="google.monitoring.v3.WindowsBasedSli.metric_mean_in_range",
index=2,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="metric_sum_in_range",
full_name="google.monitoring.v3.WindowsBasedSli.metric_sum_in_range",
index=3,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="window_period",
full_name="google.monitoring.v3.WindowsBasedSli.window_period",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD, _WINDOWSBASEDSLI_METRICRANGE,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="window_criterion",
full_name="google.monitoring.v3.WindowsBasedSli.window_criterion",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=2063,
serialized_end=2706,
)
_SERVICE_CUSTOM.containing_type = _SERVICE
_SERVICE_APPENGINE.containing_type = _SERVICE
_SERVICE_CLOUDENDPOINTS.containing_type = _SERVICE
_SERVICE_CLUSTERISTIO.containing_type = _SERVICE
_SERVICE_TELEMETRY.containing_type = _SERVICE
_SERVICE.fields_by_name["custom"].message_type = _SERVICE_CUSTOM
_SERVICE.fields_by_name["app_engine"].message_type = _SERVICE_APPENGINE
_SERVICE.fields_by_name["cloud_endpoints"].message_type = _SERVICE_CLOUDENDPOINTS
_SERVICE.fields_by_name["cluster_istio"].message_type = _SERVICE_CLUSTERISTIO
_SERVICE.fields_by_name["telemetry"].message_type = _SERVICE_TELEMETRY
_SERVICE.oneofs_by_name["identifier"].fields.append(_SERVICE.fields_by_name["custom"])
_SERVICE.fields_by_name["custom"].containing_oneof = _SERVICE.oneofs_by_name[
"identifier"
]
_SERVICE.oneofs_by_name["identifier"].fields.append(
_SERVICE.fields_by_name["app_engine"]
)
_SERVICE.fields_by_name["app_engine"].containing_oneof = _SERVICE.oneofs_by_name[
"identifier"
]
_SERVICE.oneofs_by_name["identifier"].fields.append(
_SERVICE.fields_by_name["cloud_endpoints"]
)
_SERVICE.fields_by_name["cloud_endpoints"].containing_oneof = _SERVICE.oneofs_by_name[
"identifier"
]
_SERVICE.oneofs_by_name["identifier"].fields.append(
_SERVICE.fields_by_name["cluster_istio"]
)
_SERVICE.fields_by_name["cluster_istio"].containing_oneof = _SERVICE.oneofs_by_name[
"identifier"
]
_SERVICELEVELOBJECTIVE.fields_by_name[
"service_level_indicator"
].message_type = _SERVICELEVELINDICATOR
_SERVICELEVELOBJECTIVE.fields_by_name[
"rolling_period"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_SERVICELEVELOBJECTIVE.fields_by_name[
"calendar_period"
].enum_type = google_dot_type_dot_calendar__period__pb2._CALENDARPERIOD
_SERVICELEVELOBJECTIVE_VIEW.containing_type = _SERVICELEVELOBJECTIVE
_SERVICELEVELOBJECTIVE.oneofs_by_name["period"].fields.append(
_SERVICELEVELOBJECTIVE.fields_by_name["rolling_period"]
)
_SERVICELEVELOBJECTIVE.fields_by_name[
"rolling_period"
].containing_oneof = _SERVICELEVELOBJECTIVE.oneofs_by_name["period"]
_SERVICELEVELOBJECTIVE.oneofs_by_name["period"].fields.append(
_SERVICELEVELOBJECTIVE.fields_by_name["calendar_period"]
)
_SERVICELEVELOBJECTIVE.fields_by_name[
"calendar_period"
].containing_oneof = _SERVICELEVELOBJECTIVE.oneofs_by_name["period"]
_SERVICELEVELINDICATOR.fields_by_name["basic_sli"].message_type = _BASICSLI
_SERVICELEVELINDICATOR.fields_by_name["request_based"].message_type = _REQUESTBASEDSLI
_SERVICELEVELINDICATOR.fields_by_name["windows_based"].message_type = _WINDOWSBASEDSLI
_SERVICELEVELINDICATOR.oneofs_by_name["type"].fields.append(
_SERVICELEVELINDICATOR.fields_by_name["basic_sli"]
)
_SERVICELEVELINDICATOR.fields_by_name[
"basic_sli"
].containing_oneof = _SERVICELEVELINDICATOR.oneofs_by_name["type"]
_SERVICELEVELINDICATOR.oneofs_by_name["type"].fields.append(
_SERVICELEVELINDICATOR.fields_by_name["request_based"]
)
_SERVICELEVELINDICATOR.fields_by_name[
"request_based"
].containing_oneof = _SERVICELEVELINDICATOR.oneofs_by_name["type"]
_SERVICELEVELINDICATOR.oneofs_by_name["type"].fields.append(
_SERVICELEVELINDICATOR.fields_by_name["windows_based"]
)
_SERVICELEVELINDICATOR.fields_by_name[
"windows_based"
].containing_oneof = _SERVICELEVELINDICATOR.oneofs_by_name["type"]
_BASICSLI_AVAILABILITYCRITERIA.containing_type = _BASICSLI
_BASICSLI_LATENCYCRITERIA.fields_by_name[
"threshold"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_BASICSLI_LATENCYCRITERIA.containing_type = _BASICSLI
_BASICSLI.fields_by_name["availability"].message_type = _BASICSLI_AVAILABILITYCRITERIA
_BASICSLI.fields_by_name["latency"].message_type = _BASICSLI_LATENCYCRITERIA
_BASICSLI.oneofs_by_name["sli_criteria"].fields.append(
_BASICSLI.fields_by_name["availability"]
)
_BASICSLI.fields_by_name["availability"].containing_oneof = _BASICSLI.oneofs_by_name[
"sli_criteria"
]
_BASICSLI.oneofs_by_name["sli_criteria"].fields.append(
_BASICSLI.fields_by_name["latency"]
)
_BASICSLI.fields_by_name["latency"].containing_oneof = _BASICSLI.oneofs_by_name[
"sli_criteria"
]
_REQUESTBASEDSLI.fields_by_name["good_total_ratio"].message_type = _TIMESERIESRATIO
_REQUESTBASEDSLI.fields_by_name["distribution_cut"].message_type = _DISTRIBUTIONCUT
_REQUESTBASEDSLI.oneofs_by_name["method"].fields.append(
_REQUESTBASEDSLI.fields_by_name["good_total_ratio"]
)
_REQUESTBASEDSLI.fields_by_name[
"good_total_ratio"
].containing_oneof = _REQUESTBASEDSLI.oneofs_by_name["method"]
_REQUESTBASEDSLI.oneofs_by_name["method"].fields.append(
_REQUESTBASEDSLI.fields_by_name["distribution_cut"]
)
_REQUESTBASEDSLI.fields_by_name[
"distribution_cut"
].containing_oneof = _REQUESTBASEDSLI.oneofs_by_name["method"]
_DISTRIBUTIONCUT.fields_by_name["range"].message_type = _RANGE
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[
"performance"
].message_type = _REQUESTBASEDSLI
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[
"basic_sli_performance"
].message_type = _BASICSLI
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.containing_type = _WINDOWSBASEDSLI
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"].fields.append(
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name["performance"]
)
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[
"performance"
].containing_oneof = _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"]
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"].fields.append(
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name["basic_sli_performance"]
)
_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.fields_by_name[
"basic_sli_performance"
].containing_oneof = _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD.oneofs_by_name["type"]
_WINDOWSBASEDSLI_METRICRANGE.fields_by_name["range"].message_type = _RANGE
_WINDOWSBASEDSLI_METRICRANGE.containing_type = _WINDOWSBASEDSLI
_WINDOWSBASEDSLI.fields_by_name[
"good_total_ratio_threshold"
].message_type = _WINDOWSBASEDSLI_PERFORMANCETHRESHOLD
_WINDOWSBASEDSLI.fields_by_name[
"metric_mean_in_range"
].message_type = _WINDOWSBASEDSLI_METRICRANGE
_WINDOWSBASEDSLI.fields_by_name[
"metric_sum_in_range"
].message_type = _WINDOWSBASEDSLI_METRICRANGE
_WINDOWSBASEDSLI.fields_by_name[
"window_period"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append(
_WINDOWSBASEDSLI.fields_by_name["good_bad_metric_filter"]
)
_WINDOWSBASEDSLI.fields_by_name[
"good_bad_metric_filter"
].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"]
_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append(
_WINDOWSBASEDSLI.fields_by_name["good_total_ratio_threshold"]
)
_WINDOWSBASEDSLI.fields_by_name[
"good_total_ratio_threshold"
].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"]
_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append(
_WINDOWSBASEDSLI.fields_by_name["metric_mean_in_range"]
)
_WINDOWSBASEDSLI.fields_by_name[
"metric_mean_in_range"
].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"]
_WINDOWSBASEDSLI.oneofs_by_name["window_criterion"].fields.append(
_WINDOWSBASEDSLI.fields_by_name["metric_sum_in_range"]
)
_WINDOWSBASEDSLI.fields_by_name[
"metric_sum_in_range"
].containing_oneof = _WINDOWSBASEDSLI.oneofs_by_name["window_criterion"]
DESCRIPTOR.message_types_by_name["Service"] = _SERVICE
DESCRIPTOR.message_types_by_name["ServiceLevelObjective"] = _SERVICELEVELOBJECTIVE
DESCRIPTOR.message_types_by_name["ServiceLevelIndicator"] = _SERVICELEVELINDICATOR
DESCRIPTOR.message_types_by_name["BasicSli"] = _BASICSLI
DESCRIPTOR.message_types_by_name["Range"] = _RANGE
DESCRIPTOR.message_types_by_name["RequestBasedSli"] = _REQUESTBASEDSLI
DESCRIPTOR.message_types_by_name["TimeSeriesRatio"] = _TIMESERIESRATIO
DESCRIPTOR.message_types_by_name["DistributionCut"] = _DISTRIBUTIONCUT
DESCRIPTOR.message_types_by_name["WindowsBasedSli"] = _WINDOWSBASEDSLI
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Service = _reflection.GeneratedProtocolMessageType(
"Service",
(_message.Message,),
dict(
Custom=_reflection.GeneratedProtocolMessageType(
"Custom",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICE_CUSTOM,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Custom view of service telemetry. Currently a place-holder pending final
design.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.Custom)
),
),
AppEngine=_reflection.GeneratedProtocolMessageType(
"AppEngine",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICE_APPENGINE,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""App Engine service. Learn more at https://cloud.google.com/appengine.
Attributes:
module_id:
The ID of the App Engine module underlying this service.
Corresponds to the ``module_id`` resource label in the
``gae_app`` monitored resource: https://cloud.google.com/monit
oring/api/resources#tag\_gae\_app
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.AppEngine)
),
),
CloudEndpoints=_reflection.GeneratedProtocolMessageType(
"CloudEndpoints",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICE_CLOUDENDPOINTS,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Cloud Endpoints service. Learn more at
https://cloud.google.com/endpoints.
Attributes:
service:
The name of the Cloud Endpoints service underlying this
service. Corresponds to the ``service`` resource label in the
``api`` monitored resource:
https://cloud.google.com/monitoring/api/resources#tag\_api
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.CloudEndpoints)
),
),
ClusterIstio=_reflection.GeneratedProtocolMessageType(
"ClusterIstio",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICE_CLUSTERISTIO,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Istio service. Learn more at http://istio.io.
Attributes:
location:
The location of the Kubernetes cluster in which this Istio
service is defined. Corresponds to the ``location`` resource
label in ``k8s_cluster`` resources.
cluster_name:
The name of the Kubernetes cluster in which this Istio service
is defined. Corresponds to the ``cluster_name`` resource label
in ``k8s_cluster`` resources.
service_namespace:
The namespace of the Istio service underlying this service.
Corresponds to the ``destination_service_namespace`` metric
label in Istio metrics.
service_name:
The name of the Istio service underlying this service.
Corresponds to the ``destination_service_name`` metric label
in Istio metrics.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.ClusterIstio)
),
),
Telemetry=_reflection.GeneratedProtocolMessageType(
"Telemetry",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICE_TELEMETRY,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Configuration for how to query telemetry on a Service.
Attributes:
resource_name:
The full name of the resource that defines this service.
Formatted as described in
https://cloud.google.com/apis/design/resource\_names.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Service.Telemetry)
),
),
DESCRIPTOR=_SERVICE,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A ``Service`` is a discrete, autonomous, and network-accessible unit,
designed to solve an individual concern
(`Wikipedia <https://en.wikipedia.org/wiki/Service-orientation>`__). In
Stackdriver Monitoring, a ``Service`` acts as the root resource under
which operational aspects of the service are accessible.
Attributes:
name:
Resource name for this Service. Of the form
``projects/{project_id}/services/{service_id}``.
display_name:
Name used for UI elements listing this Service.
identifier:
REQUIRED. Service-identifying atoms specifying the underlying
service.
custom:
Custom service type.
app_engine:
Type used for App Engine services.
cloud_endpoints:
Type used for Cloud Endpoints services.
cluster_istio:
Type used for Istio services that live in a Kubernetes
cluster.
telemetry:
Configuration for how to query telemetry on a Service.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Service)
),
)
_sym_db.RegisterMessage(Service)
_sym_db.RegisterMessage(Service.Custom)
_sym_db.RegisterMessage(Service.AppEngine)
_sym_db.RegisterMessage(Service.CloudEndpoints)
_sym_db.RegisterMessage(Service.ClusterIstio)
_sym_db.RegisterMessage(Service.Telemetry)
ServiceLevelObjective = _reflection.GeneratedProtocolMessageType(
"ServiceLevelObjective",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICELEVELOBJECTIVE,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A Service-Level Objective (SLO) describes a level of desired good
service. It consists of a service-level indicator (SLI), a performance
goal, and a period over which the objective is to be evaluated against
that goal. The SLO can use SLIs defined in a number of different
manners. Typical SLOs might include "99% of requests in each rolling
week have latency below 200 milliseconds" or "99.5% of requests in each
calendar month return successfully."
Attributes:
name:
Resource name for this ``ServiceLevelObjective``. Of the form
``projects/{project_id}/services/{service_id}/serviceLevelObje
ctives/{slo_name}``.
display_name:
Name used for UI elements listing this SLO.
service_level_indicator:
The definition of good service, used to measure and calculate
the quality of the ``Service``'s performance with respect to a
single aspect of service quality.
goal:
The fraction of service that must be good in order for this
objective to be met. ``0 < goal <= 1``.
period:
The time period over which the objective will be evaluated.
rolling_period:
A rolling time period, semantically "in the past
``<rolling_period>``". Must be an integer multiple of 1 day no
larger than 30 days.
calendar_period:
A calendar period, semantically "since the start of the
current ``<calendar_period>``". At this time, only ``DAY``,
``WEEK``, ``FORTNIGHT``, and ``MONTH`` are supported.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ServiceLevelObjective)
),
)
_sym_db.RegisterMessage(ServiceLevelObjective)
ServiceLevelIndicator = _reflection.GeneratedProtocolMessageType(
"ServiceLevelIndicator",
(_message.Message,),
dict(
DESCRIPTOR=_SERVICELEVELINDICATOR,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A Service-Level Indicator (SLI) describes the "performance" of a
service. For some services, the SLI is well-defined. In such cases, the
SLI can be described easily by referencing the well-known SLI and
providing the needed parameters. Alternatively, a "custom" SLI can be
defined with a query to the underlying metric store. An SLI is defined
to be ``good_service / total_service`` over any queried time interval.
The value of performance always falls into the range
``0 <= performance <= 1``. A custom SLI describes how to compute this
ratio, whether this is by dividing values from a pair of time series,
cutting a ``Distribution`` into good and bad counts, or counting time
windows in which the service complies with a criterion. For separation
of concerns, a single Service-Level Indicator measures performance for
only one aspect of service quality, such as fraction of successful
queries or fast-enough queries.
Attributes:
type:
Service level indicators can be grouped by whether the "unit"
of service being measured is based on counts of good requests
or on counts of good time windows
basic_sli:
Basic SLI on a well-known service type.
request_based:
Request-based SLIs
windows_based:
Windows-based SLIs
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ServiceLevelIndicator)
),
)
_sym_db.RegisterMessage(ServiceLevelIndicator)
BasicSli = _reflection.GeneratedProtocolMessageType(
"BasicSli",
(_message.Message,),
dict(
AvailabilityCriteria=_reflection.GeneratedProtocolMessageType(
"AvailabilityCriteria",
(_message.Message,),
dict(
DESCRIPTOR=_BASICSLI_AVAILABILITYCRITERIA,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Future parameters for the availability SLI.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.BasicSli.AvailabilityCriteria)
),
),
LatencyCriteria=_reflection.GeneratedProtocolMessageType(
"LatencyCriteria",
(_message.Message,),
dict(
DESCRIPTOR=_BASICSLI_LATENCYCRITERIA,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Parameters for a latency threshold SLI.
Attributes:
threshold:
Good service is defined to be the count of requests made to
this service that return in no more than ``threshold``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.BasicSli.LatencyCriteria)
),
),
DESCRIPTOR=_BASICSLI,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""An SLI measuring performance on a well-known service type. Performance
will be computed on the basis of pre-defined metrics. The type of the
``service_resource`` determines the metrics to use and the
``service_resource.labels`` and ``metric_labels`` are used to construct
a monitoring filter to filter that metric down to just the data relevant
to this service.
Attributes:
method:
OPTIONAL: The set of RPCs to which this SLI is relevant.
Telemetry from other methods will not be used to calculate
performance for this SLI. If omitted, this SLI applies to all
the Service's methods. For service types that don't support
breaking down by method, setting this field will result in an
error.
location:
OPTIONAL: The set of locations to which this SLI is relevant.
Telemetry from other locations will not be used to calculate
performance for this SLI. If omitted, this SLI applies to all
locations in which the Service has activity. For service types
that don't support breaking down by location, setting this
field will result in an error.
version:
OPTIONAL: The set of API versions to which this SLI is
relevant. Telemetry from other API versions will not be used
to calculate performance for this SLI. If omitted, this SLI
applies to all API versions. For service types that don't
support breaking down by version, setting this field will
result in an error.
sli_criteria:
This SLI can be evaluated on the basis of availability or
latency.
availability:
Good service is defined to be the count of requests made to
this service that return successfully.
latency:
Good service is defined to be the count of requests made to
this service that are fast enough with respect to
``latency.threshold``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.BasicSli)
),
)
_sym_db.RegisterMessage(BasicSli)
_sym_db.RegisterMessage(BasicSli.AvailabilityCriteria)
_sym_db.RegisterMessage(BasicSli.LatencyCriteria)
Range = _reflection.GeneratedProtocolMessageType(
"Range",
(_message.Message,),
dict(
DESCRIPTOR=_RANGE,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Range of numerical values, inclusive of ``min`` and exclusive of
``max``. If the open range "< range.max" is desired, set
``range.min = -infinity``. If the open range ">= range.min" is desired,
set ``range.max = infinity``.
Attributes:
min:
Range minimum.
max:
Range maximum.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Range)
),
)
_sym_db.RegisterMessage(Range)
RequestBasedSli = _reflection.GeneratedProtocolMessageType(
"RequestBasedSli",
(_message.Message,),
dict(
DESCRIPTOR=_REQUESTBASEDSLI,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""Service Level Indicators for which atomic units of service are counted
directly.
Attributes:
method:
The means to compute a ratio of ``good_service`` to
``total_service``.
good_total_ratio:
``good_total_ratio`` is used when the ratio of
``good_service`` to ``total_service`` is computed from two
``TimeSeries``.
distribution_cut:
``distribution_cut`` is used when ``good_service`` is a count
of values aggregated in a ``Distribution`` that fall into a
good range. The ``total_service`` is the total count of all
values aggregated in the ``Distribution``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.RequestBasedSli)
),
)
_sym_db.RegisterMessage(RequestBasedSli)
TimeSeriesRatio = _reflection.GeneratedProtocolMessageType(
"TimeSeriesRatio",
(_message.Message,),
dict(
DESCRIPTOR=_TIMESERIESRATIO,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A ``TimeSeriesRatio`` specifies two ``TimeSeries`` to use for computing
the ``good_service / total_service`` ratio. The specified ``TimeSeries``
must have ``ValueType = DOUBLE`` or ``ValueType = INT64`` and must have
``MetricKind = DELTA`` or ``MetricKind = CUMULATIVE``. The
``TimeSeriesRatio`` must specify exactly two of good, bad, and total,
and the relationship ``good_service + bad_service = total_service`` will
be assumed.
Attributes:
good_service_filter:
A `monitoring filter
<https://cloud.google.com/monitoring/api/v3/filters>`__
specifying a ``TimeSeries`` quantifying good service provided.
Must have ``ValueType = DOUBLE`` or ``ValueType = INT64`` and
must have ``MetricKind = DELTA`` or ``MetricKind =
CUMULATIVE``.
bad_service_filter:
A `monitoring filter
<https://cloud.google.com/monitoring/api/v3/filters>`__
specifying a ``TimeSeries`` quantifying bad service, either
demanded service that was not provided or demanded service
that was of inadequate quality. Must have ``ValueType =
DOUBLE`` or ``ValueType = INT64`` and must have ``MetricKind =
DELTA`` or ``MetricKind = CUMULATIVE``.
total_service_filter:
A `monitoring filter
<https://cloud.google.com/monitoring/api/v3/filters>`__
specifying a ``TimeSeries`` quantifying total demanded
service. Must have ``ValueType = DOUBLE`` or ``ValueType =
INT64`` and must have ``MetricKind = DELTA`` or ``MetricKind =
CUMULATIVE``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeSeriesRatio)
),
)
_sym_db.RegisterMessage(TimeSeriesRatio)
DistributionCut = _reflection.GeneratedProtocolMessageType(
"DistributionCut",
(_message.Message,),
dict(
DESCRIPTOR=_DISTRIBUTIONCUT,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A ``DistributionCut`` defines a ``TimeSeries`` and thresholds used for
measuring good service and total service. The ``TimeSeries`` must have
``ValueType = DISTRIBUTION`` and ``MetricKind = DELTA`` or
``MetricKind = CUMULATIVE``. The computed ``good_service`` will be the
count of values x in the ``Distribution`` such that
``range.min <= x < range.max``.
Attributes:
distribution_filter:
A `monitoring filter
<https://cloud.google.com/monitoring/api/v3/filters>`__
specifying a ``TimeSeries`` aggregating values. Must have
``ValueType = DISTRIBUTION`` and ``MetricKind = DELTA`` or
``MetricKind = CUMULATIVE``.
range:
Range of values considered "good." For a one-sided range, set
one bound to an infinite value.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.DistributionCut)
),
)
_sym_db.RegisterMessage(DistributionCut)
WindowsBasedSli = _reflection.GeneratedProtocolMessageType(
"WindowsBasedSli",
(_message.Message,),
dict(
PerformanceThreshold=_reflection.GeneratedProtocolMessageType(
"PerformanceThreshold",
(_message.Message,),
dict(
DESCRIPTOR=_WINDOWSBASEDSLI_PERFORMANCETHRESHOLD,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A ``PerformanceThreshold`` is used when each window is good when that
window has a sufficiently high ``performance``.
Attributes:
type:
The means, either a request-based SLI or a basic SLI, by which
to compute performance over a window.
performance:
``RequestBasedSli`` to evaluate to judge window quality.
basic_sli_performance:
``BasicSli`` to evaluate to judge window quality.
threshold:
If window ``performance >= threshold``, the window is counted
as good.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.WindowsBasedSli.PerformanceThreshold)
),
),
MetricRange=_reflection.GeneratedProtocolMessageType(
"MetricRange",
(_message.Message,),
dict(
DESCRIPTOR=_WINDOWSBASEDSLI_METRICRANGE,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A ``MetricRange`` is used when each window is good when the value x of a
single ``TimeSeries`` satisfies ``range.min <= x < range.max``. The
provided ``TimeSeries`` must have ``ValueType = INT64`` or
``ValueType = DOUBLE`` and ``MetricKind = GAUGE``.
Attributes:
time_series:
A `monitoring filter
<https://cloud.google.com/monitoring/api/v3/filters>`__
specifying the ``TimeSeries`` to use for evaluating window
quality.
range:
Range of values considered "good." For a one-sided range, set
one bound to an infinite value.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.WindowsBasedSli.MetricRange)
),
),
DESCRIPTOR=_WINDOWSBASEDSLI,
__module__="google.cloud.monitoring_v3.proto.service_pb2",
__doc__="""A ``WindowsBasedSli`` defines ``good_service`` as the count of time
windows for which the provided service was of good quality. Criteria for
determining if service was good are embedded in the
``window_criterion``.
Attributes:
window_criterion:
The criterion to use for evaluating window goodness.
good_bad_metric_filter:
A `monitoring filter
<https://cloud.google.com/monitoring/api/v3/filters>`__
specifying a ``TimeSeries`` with ``ValueType = BOOL``. The
window is good if any ``true`` values appear in the window.
good_total_ratio_threshold:
A window is good if its ``performance`` is high enough.
metric_mean_in_range:
A window is good if the metric's value is in a good range,
averaged across returned streams.
metric_sum_in_range:
A window is good if the metric's value is in a good range,
summed across returned streams.
window_period:
Duration over which window quality is evaluated. Must be an
integer fraction of a day and at least ``60s``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.WindowsBasedSli)
),
)
_sym_db.RegisterMessage(WindowsBasedSli)
_sym_db.RegisterMessage(WindowsBasedSli.PerformanceThreshold)
_sym_db.RegisterMessage(WindowsBasedSli.MetricRange)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
class DataCenter(object):
def __init__(self, history_data):
self.heat_list = history_data['HEAT']
def get_heat(self, t):
return self.heat_list[t]
|
# coding: utf-8
from PIL import Image
import os
import numpy as np
import cv2
''' Some times, the boxes file has been removed accidenly,
but the original images has been saved. We can compare
the two versions (w/o mosaic) of images to get the bound box
'''
# Step1, Find the images, without bound box
files = ['akari_01_boxes.txt', 'ai_uehara_01_boxes.txt']
files_have_boxes = {}
for _ in files:
fobj = open(_, 'r')
fs = [line.strip().split(":")[0].strip() for line in fobj]
fs = [os.path.basename(f) for f in fs]
for f in fs:
files_have_boxes[f] = 1
fobj.close()
os.mkdir('diff_images')
f_without_box = [f for f in os.listdir("mosaic_images/") if f not in files_have_boxes]
# Step2, compare the images, find bound box for each file that has no bound box
boxes = {}
for f in f_without_box:
raw_img = Image.open(os.path.join('raw_images', f))
mosaic_img = Image.open(os.path.join('mosaic_images', f))
raw_img = np.asarray(raw_img)
mosaic_img = np.asarray(mosaic_img)
diff_img = mosaic_img - raw_img
diff_y, diff_x = np.nonzero(diff_img[:,:,0])
first_non_zero = (diff_x.min(), diff_y.min())
last_non_zero = (diff_x.max(), diff_y.max())
try:
box = [first_non_zero[0], first_non_zero[1],\
last_non_zero[0]-first_non_zero[0], \
last_non_zero[1] -first_non_zero[1]]
#print(box)
boxes[f] = box
cv2.rectangle(diff_img, first_non_zero, last_non_zero, (255, 0,0), 2)
diff_img = Image.fromarray(diff_img, 'RGB')
diff_img.save(os.path.join('diff_images', f))
except Exception, e:
print e
continue
# Step3. Save the bound box to a file
fobj = open('akari_01_boxes.txt.missed', 'w')
for f in boxes:
line = "./data/anote_videos/mosaic_images/{} : {} : 0".format(f, tuple(boxes[f]))
fobj.writelines(line + os.linesep)
fobj.close()
|
from __future__ import print_function
import sys
import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
from data import JE_ROOT, JE_CLASSES
from PIL import Image
from data import JEDetection, BaseTransform, JE_CLASSES
import torch.utils.data as data
from ssd import build_ssd
import os
import cv2
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--trained_model', default='/home/waiyang/crowd_counting/repulsion_loss_ssd/weights/ssd_300_VOC0712.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='/home/waiyang/crowd_counting/repulsion_loss_ssd/eval', type=str,
help='Dir to save results')
parser.add_argument('--visual_threshold', default=0.55, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to train model')
parser.add_argument('--je_root', default=JE_ROOT, help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks")
args = parser.parse_args()
if args.cuda and torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
VOC_CLASSES = ( # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
labelmap=VOC_CLASSES
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
def test_net(save_folder, net, cuda, testset, transform, thresh,testset_name):
pd_filename = os.path.join(save_folder,testset_name+'_pred.txt')
test_output_root='/home/waiyang/crowd_counting/repulsion_loss_ssd/test_output2'
num_images = len(testset)
for i in range(num_images):
print('Testing image {:d}/{:d}....'.format(i+1, num_images))
img,img_id = testset.pull_image(i)
frame = img
output_img_path = os.path.join(test_output_root, testset_name, img_id[1] + "_new.jpg")
x = torch.from_numpy(transform(img)).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
if cuda:
x = x.cuda()
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
pred_num = 0
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= thresh:
if pred_num == 0:
with open(pd_filename, mode='a') as f:
f.write(img_id[1]+' ')
score = detections[0, i, j, 0]
label_name = labelmap[i-1]
if label_name=='person':
pt = (detections[0, i, j, 1:]*scale).cpu().numpy()
coords = (pt[0], pt[1], pt[2], pt[3])
cv2.rectangle(frame,
(int(pt[0]), int(pt[1])),
(int(pt[2]), int(pt[3])),
COLORS[i % 3], 2)
pred_num += 1
print(label_name)
with open(pd_filename, mode='a') as f:
f.write(str(i-1) + ' ' + str(score) + ' ' +' '.join(str(c) for c in coords)+' ')
j += 1
with open(pd_filename, mode='a') as f:
f.write('\n')
cv2.imwrite(output_img_path,frame)
def test():
# load net
num_classes = len(VOC_CLASSES) + 1 # +1 background
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
testset = JEDetection(args.je_root, ['IMM'])
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, testset,
BaseTransform(net.size, (104, 117, 123)),
thresh=args.visual_threshold,testset_name='IMM')
if __name__ == '__main__':
test()
|
class Message:
def __init__(self, messageType, message):
self.messageType = messageType
self.message = message
if self.messageType != "success" and self.messageType != "error":
self.messageType = "success"
def repr_json(self):
return dict(messageType=self.messageType, message=self.message)
|
from awacs import aws, sts
from awacs.aws import Allow, Principal, Statement
from troposphere import Base64, GetAtt, Join, Output, Parameter, Ref, Split, Template
from troposphere.autoscaling import AutoScalingGroup, LaunchConfiguration, Tag
from troposphere.ec2 import BlockDeviceMapping, EBSBlockDevice
from troposphere.iam import InstanceProfile, Role
class RunnerAsg:
def __init__(self, sceptre_user_data):
self.template = Template()
self.sceptre_user_data = sceptre_user_data
self.template.add_description("Runner ASG")
def add_parameters(self):
self.runner_subnets = self.template.add_parameter(
Parameter("RunnerSubnets", Description="runner_subnets", Type="String")
)
self.runner_security_group = self.template.add_parameter(
Parameter(
"RunnerSecurityGroup",
Description="runner_security_group",
Type="String",
)
)
self.runner_ami_id = self.template.add_parameter(
Parameter("RunnerAmiId", Description="runner_ami_id", Type="String")
)
self.runner_server_instance_type = self.template.add_parameter(
Parameter(
"RunnerServerInstanceType",
Description="runner_server_instance_type",
Type="String",
)
)
self.runner_key_pair = self.template.add_parameter(
Parameter("RunnerKeyPair", Description="runner_key_pair", Type="String")
)
self.runner_desired_count = self.template.add_parameter(
Parameter(
"RunnerDesiredCount", Description="runner_desired_count", Type="Number"
)
)
self.runner_min_count = self.template.add_parameter(
Parameter("RunnerMinCount", Description="runner_min_count", Type="Number")
)
self.runner_max_count = self.template.add_parameter(
Parameter("RunnerMaxCount", Description="runner_max_count", Type="Number")
)
self.runner_job_concurrency = self.template.add_parameter(
Parameter(
"RunnerJobConcurrency",
Description="runner_job_concurrency",
Type="Number",
)
)
self.runner_tag_list = self.template.add_parameter(
Parameter("RunnerTagList", Description="runner_tag_list", Type="String")
)
self.runner_register_token = self.template.add_parameter(
Parameter(
"RunnerRegisterToken",
Description="runner_register_token",
Type="String",
)
)
self.runner_gitlab_url = self.template.add_parameter(
Parameter("RunnerGitlabUrl", Description="runner_gitlab_url", Type="String")
)
self.runner_volume_size = self.template.add_parameter(
Parameter(
"RunnerVolumeSize", Description="runner_volume_size", Type="String"
)
)
self.runner_version = self.template.add_parameter(
Parameter("RunnerVersion", Description="runner_version", Type="String")
)
def add_resources(self):
self.runner_ssm_role = self.template.add_resource(
Role(
"RunnerSsmRole",
Path="/",
ManagedPolicyArns=[
"arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
],
AssumeRolePolicyDocument=aws.Policy(
Statement=[
Statement(
Action=[sts.AssumeRole],
Effect=Allow,
Principal=Principal("Service", ["ec2.amazonaws.com"]),
)
]
),
)
)
self.runner_ssm_instanceprofile = self.template.add_resource(
InstanceProfile(
"RunnerSsmInstanceProfile", Path="/", Roles=[Ref(self.runner_ssm_role)]
)
)
self.runner_launch_config = self.template.add_resource(
LaunchConfiguration(
"RunnerLaunchConfiguration",
UserData=Base64(
Join(
"",
[
"#!/bin/bash\n",
"#####install ssm######\n",
"yum install -y amazon-ssm-agent\n",
"systemctl enable amazon-ssm-agent\n",
"systemctl start amazon-ssm-agent\n",
"####install docker####\n",
"yum install -y docker\n",
"systemctl enable docker\n",
"systemctl start docker\n",
"####install runner####\n",
"yum install -y wget\n",
"wget -O /usr/local/bin/gitlab-runner ",
"https://gitlab-runner-downloads.s3.amazonaws.com/v",
Ref(self.runner_version),
"/binaries/gitlab-runner-linux-amd64\n",
"ln -s /usr/local/bin/gitlab-runner ",
"/usr/bin/gitlab-runner\n",
"chmod +x /usr/local/bin/gitlab-runner\n",
"useradd --comment 'GitLab Runner' ",
"--create-home gitlab-runner --shell /bin/bash\n",
"/usr/local/bin/gitlab-runner install ",
"--user=gitlab-runner "
"--working-directory=/home/gitlab-runner\n",
"systemctl enable gitlab-runner\n",
"systemctl start gitlab-runner\n",
"####register runner####\n",
"gitlab-runner register ",
"--config=/etc/gitlab-runner/config.toml ",
"--request-concurrency=",
Ref(self.runner_job_concurrency),
" ",
"--tag-list=",
Ref(self.runner_tag_list),
" ",
"--non-interactive ",
"--registration-token=",
Ref(self.runner_register_token),
" ",
"--run-untagged=true ",
"--locked=false ",
"--url=",
Ref(self.runner_gitlab_url),
" ",
"--executor=docker ",
"--docker-image=alpine:latest ",
"--docker-privileged=true\n",
"####create unregister script####\n",
"TOKEN=$(gitlab-runner list 2>&1 | grep Executor | ",
"awk '{ print $4 }' | awk -F= '{ print $2 }')\n",
"URL=$(gitlab-runner list 2>&1 | grep Executor | ",
"awk '{ print $5 }' | awk -F= '{ print $2 }')\n",
"echo gitlab-runner unregister ",
"--url $URL --token $TOKEN > /unregister.sh\n",
"chmod +x /unregister.sh",
],
)
),
ImageId=Ref(self.runner_ami_id),
KeyName=Ref(self.runner_key_pair),
BlockDeviceMappings=[
BlockDeviceMapping(
DeviceName="/dev/xvda",
Ebs=EBSBlockDevice(VolumeSize=Ref(self.runner_volume_size)),
)
],
SecurityGroups=[Ref(self.runner_security_group)],
InstanceType=Ref(self.runner_server_instance_type),
IamInstanceProfile=GetAtt(self.runner_ssm_instanceprofile, "Arn"),
)
)
self.runner_autoscaling_group = self.template.add_resource(
AutoScalingGroup(
"RunnerAutoscalingGroup",
DesiredCapacity=Ref(self.runner_desired_count),
LaunchConfigurationName=Ref(self.runner_launch_config),
MinSize=Ref(self.runner_min_count),
MaxSize=Ref(self.runner_max_count),
VPCZoneIdentifier=Split(",", Ref(self.runner_subnets)),
Tags=[Tag("Name", "gitlab-runner-created-by-asg", True)],
)
)
def add_outputs(self):
self.template.add_output(
[
Output(
"RunnerLaunchConfiguration", Value=Ref(self.runner_launch_config)
),
Output(
"RunnerAutoscalingGroup", Value=Ref(self.runner_autoscaling_group)
),
]
)
def sceptre_handler(sceptre_user_data):
_runner_asg = RunnerAsg(sceptre_user_data)
_runner_asg.add_parameters()
_runner_asg.add_resources()
_runner_asg.add_outputs()
return _runner_asg.template.to_json()
|
from brownie import Contract, interface
from brownie.exceptions import ContractNotFound
from cachetools.func import ttl_cache
from yearn.cache import memory
from yearn.multicall2 import fetch_multicall
from yearn.prices.constants import weth, usdc
FACTORIES = {
"uniswap": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"sushiswap": "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac",
}
ROUTERS = {
"uniswap": "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D",
"sushiswap": "0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F",
}
FACTORY_TO_ROUTER = {FACTORIES[name]: name for name in FACTORIES}
@ttl_cache(ttl=600)
def get_price(token_in, token_out=usdc, router="uniswap", block=None):
"""
Calculate a price based on Uniswap Router quote for selling one `token_in`.
Always uses intermediate WETH pair.
"""
tokens = [Contract(str(token)) for token in [token_in, token_out]]
amount_in = 10 ** tokens[0].decimals()
path = [token_in, token_out] if weth in (token_in, token_out) else [token_in, weth, token_out]
fees = 0.997 ** (len(path) - 1)
if router in ROUTERS:
router = interface.UniswapRouter(ROUTERS[router])
try:
quote = router.getAmountsOut(amount_in, path, block_identifier=block)
amount_out = quote[-1] / 10 ** tokens[1].decimals()
return amount_out / fees
except ValueError:
pass
@ttl_cache(ttl=600)
def get_price_v1(asset, block=None):
factory = Contract("0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95")
try:
asset = Contract(asset)
exchange = interface.UniswapV1Exchange(factory.getExchange(asset))
eth_bought = exchange.getTokenToEthInputPrice(10 ** asset.decimals(), block_identifier=block)
exchange = interface.UniswapV1Exchange(factory.getExchange(usdc))
usdc_bought = exchange.getEthToTokenInputPrice(eth_bought, block_identifier=block) / 1e6
fees = 0.997 ** 2
return usdc_bought / fees
except (ContractNotFound, ValueError) as e:
pass
@memory.cache()
def is_uniswap_pool(address):
try:
return Contract(address).factory() in FACTORY_TO_ROUTER
except (ValueError, OverflowError, AttributeError):
pass
return False
@ttl_cache(ttl=600)
def lp_price(address, block=None):
""" Get Uniswap/Sushiswap LP token price. """
pair = Contract(address)
factory, token0, token1, supply, reserves = fetch_multicall(
[pair, "factory"],
[pair, "token0"],
[pair, "token1"],
[pair, "totalSupply"],
[pair, "getReserves"],
block=block
)
router = FACTORY_TO_ROUTER[factory]
tokens = [Contract(token) for token in [token0, token1]]
scales = [10 ** token.decimals() for token in tokens]
prices = [get_price(token, router=router, block=block) for token in tokens]
supply = supply / 1e18
balances = [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
return sum(balances) / supply
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import reusables
import logging
import platform
from .common_test_data import *
reusables.change_logger_levels(logging.getLogger('reusables'), logging.INFO)
class ExampleSleepTasker(reusables.Tasker):
@staticmethod
def perform_task(task, queue):
time.sleep(task)
queue.put(task)
class ExampleAddTasker(reusables.Tasker):
@staticmethod
def perform_task(task, queue):
queue.put(task, task + task)
class TestTasker(BaseTestClass):
def test_example_add_tasker(self):
if reusables.win_based:
return
tasker = ExampleAddTasker(list(range(100)))
try:
tasker.run()
tasker.change_task_size(2)
tasker.change_task_size(6)
tasker.pause()
tasker.unpuase()
assert isinstance(tasker.get_state(), dict)
results = [tasker.result_queue.get() for _ in range(100)]
finally:
tasker.stop()
assert len(results) == 100
def test_stop_at_emtpy(self):
tasker = ExampleSleepTasker([.1, .2])
tasker.main_loop(True)
assert [tasker.result_queue.get() for _ in (0, 0)] == [.1, .2]
def test_bad_size_change(self):
tasker = reusables.Tasker()
try:
tasker.perform_task(1, 2)
except NotImplementedError:
pass
else:
assert False
assert not tasker.change_task_size(-1)
assert not tasker.change_task_size('a')
assert tasker.change_task_size(2)
assert tasker.change_task_size(6)
tasker._reset_and_pause()
def test_tasker_commands(self):
import datetime
reusables.add_stream_handler("reusables")
tasker = ExampleAddTasker(max_tasks=4, run_until=datetime.datetime.now() + datetime.timedelta(minutes=1))
tasker.command_queue.put("change task size 1")
tasker.command_queue.put("pause")
tasker.command_queue.put("unpause")
tasker.command_queue.put("stop")
tasker.put(5)
tasker.main_loop()
r = tasker.get_state()
assert r['stopped'], r
assert tasker.max_tasks == 1, tasker.max_tasks
class TestPool(unittest.TestCase):
def test_run_in_pool(self):
def test(a, b=True):
return a, a * 2, b
res = reusables.run_in_pool(test, [1, 2, 3, 4])
assert res == [(1, 2, True), (2, 4, True), (3, 6, True), (4, 8, True)]
res2 = reusables.run_in_pool(test, [4, 6], target_kwargs={"b": False})
assert res2 == [(4, 8, False), (6, 12, False)]
|
import pandas as pd
def calcLatLon(northing, easting):
"""
This function converts northings/eastings to latitude and longitudes.
It is almost entirely based upon a function
written by Tom Neer found in November 2017 at his blog:
http://www.neercartography.com/convert-consus-albers-to-wgs84/
"""
from math import asin, atan2, cos, log, pow, sin, sqrt
# CONSUS Albers variables (EPSG: 5070)
RE_NAD83 = 6378137.0
E_NAD83 = 0.0818187034 # Eccentricity
D2R = 0.01745329251 # Pi/180
standardParallel1 = 43.
standardParallel2 = 47.
centralMeridian = -114.
originLat = 30
originLon = 0
m1 = cos(standardParallel1 * D2R) / \
sqrt(1.0 - pow((E_NAD83 * sin(standardParallel1 * D2R)), 2.0))
m2 = cos(standardParallel2 * D2R) / \
sqrt(1.0 - pow((E_NAD83 * sin(standardParallel2 * D2R)), 2.0))
def calcPhi(i):
sinPhi = sin(i * D2R)
return (1.0 - pow(E_NAD83, 2.0)) * \
((sinPhi/(1.0 - pow((E_NAD83 * sinPhi), 2.0))) -
1.0/(2.0 * E_NAD83) *
log((1.0 - E_NAD83 * sinPhi)/(1.0 + E_NAD83 * sinPhi)))
q0 = calcPhi(originLat)
q1 = calcPhi(standardParallel1)
q2 = calcPhi(standardParallel2)
nc = (pow(m1, 2.0) - pow(m2, 2.0)) / (q2 - q1)
C = pow(m1, 2.0) + nc * q1
rho0 = RE_NAD83 * sqrt(C - nc * q0) / nc
rho = sqrt(pow(easting, 2.0) + pow((rho0 - northing), 2.0))
q = (C - pow((rho * nc / RE_NAD83), 2.0)) / nc
beta = asin(q / (1.0 - log((1.0 - E_NAD83) / (1.0 + E_NAD83)) *
(1.0 - pow(E_NAD83, 2.0))/(2.0 * E_NAD83)))
a = 1.0 / 3.0 * pow(E_NAD83, 2.0) + 31.0 / 180.0 * \
pow(E_NAD83, 4.0) + 517.0 / 5040.0 * pow(E_NAD83, 6.0)
b = 23.0/360.0 * pow(E_NAD83, 4.0) + 251.0 / 3780.0 * pow(E_NAD83, 6.0)
c = 761.0/45360.0 * pow(E_NAD83, 6.0)
theta = atan2(easting, (rho0 - northing))
lat = (beta + a * sin(2.0 * beta) + b * sin(4.0 * beta) +
c * sin(6.0 * beta))/D2R
lon = centralMeridian + (theta / D2R) / nc
coords = [lat, lon]
return coords
def get_model_ts(infilename, na_values='-9999', comment='#',
rename_columns=None, column='streamflow'):
"""Retrieve modeled time series from ASCII file
Parameters
----------
infilename : str
Pathname for file
na_values : str, optional
Values that should be converted to `NA`. Default value is '-9999'
comment : str, optional
Comment indicator at the start of the line. Default value is '#'=
rename_columns: dict or None, optional
Dictionary to rename columns. Default value is None
column: str, optional
Name of the column that will be returned. Default value is 'streamflow'
Returns
-------
pandas.Series
Column from file as a pandas.Series
"""
ts = pd.read_csv(infilename, comment=comment, na_values=na_values,
index_col=0, parse_dates=True)
# renaming of columns may seem superfluous if we are converting to a
# Series anyway, but it allows all the Series to have the same name
if rename_columns:
ts.columns = [column]
return pd.Series(ts[column])
def locate_nearest_neighbor_values(point, gdf, temperature_sites):
tree = KDTree(temperature_sites, leafsize=temperature_sites.shape[0]+1)
distances, ndx = tree.query([point], k=10)
nearest_neighbors_data = gdf.iloc[list(ndx[0])]
return nearest_neighbors_data
|
# Generated by Django 3.1.7 on 2021-04-13 06:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contenido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('clave', models.CharField(max_length=64)),
('valor', models.TextField()),
],
),
migrations.CreateModel(
name='Comentario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=200)),
('cuerpo', models.TextField()),
('fecha', models.DateTimeField(verbose_name='publicado')),
('contenido', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms_put.contenido')),
],
),
]
|
from storyscript.compiler.semantics.functions.HubMutations import Hub
def test_mutations_empty():
assert len(Hub('').mutations()) == 0
def test_mutations_comment():
assert len(Hub('#comment\n#another comment\n').mutations()) == 0
|
import argparse
from distutils.util import strtobool
import pathlib
import siml
import convert_raw_data
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'settings_yaml',
type=pathlib.Path,
help='YAML file name of settings.')
parser.add_argument(
'raw_data_directory',
type=pathlib.Path,
help='Raw data directory')
parser.add_argument(
'-p', '--preprocessors-pkl',
type=pathlib.Path,
default=None,
help='Preprocessors.pkl file')
parser.add_argument(
'-o', '--out-dir',
type=pathlib.Path,
default=None,
help='Output directory name')
parser.add_argument(
'-f', '--force-renew',
type=strtobool,
default=0,
help='If True, overwrite existing data [False]')
parser.add_argument(
'-l', '--light',
type=strtobool,
default=0,
help='If True, compute minimum required data only [False]')
parser.add_argument(
'-n', '--read-npy',
type=strtobool,
default=1,
help='If True, read .npy files instead of original files '
'if exists [True]')
parser.add_argument(
'-r', '--recursive',
type=strtobool,
default=1,
help='If True, process directory recursively [True]')
parser.add_argument(
'-e', '--elemental',
type=strtobool,
default=0,
help='If True, create also elemental features [False]')
parser.add_argument(
'-a', '--convert-answer',
type=strtobool,
default=1,
help='If True, convert answer [True]')
parser.add_argument(
'-s', '--skip-interim',
type=strtobool,
default=0,
help='If True, skip conversion of interim data [False]')
args = parser.parse_args()
main_setting = siml.setting.MainSetting.read_settings_yaml(
args.settings_yaml)
if not args.convert_answer:
main_setting.conversion.required_file_names = ['*.msh', '*.cnt']
main_setting.data.raw = args.raw_data_directory
if args.out_dir is None:
args.out_dir = args.raw_data_directory
main_setting.data.interim = [siml.prepost.determine_output_directory(
main_setting.data.raw,
main_setting.data.raw.parent / 'interim', 'raw')]
main_setting.data.preprocessed = [
siml.prepost.determine_output_directory(
main_setting.data.raw,
main_setting.data.raw.parent / 'preprocessed', 'raw')]
else:
main_setting.data.interim = [args.out_dir / 'interim']
main_setting.data.preprocessed = [args.out_dir / 'preprocessed']
if not args.skip_interim:
conversion_function = convert_raw_data.HeatConversionFuncionCreator(
create_elemental=args.elemental,
convert_answer=args.convert_answer,
light=args.light)
raw_converter = siml.prepost.RawConverter(
main_setting,
conversion_function=conversion_function,
filter_function=convert_raw_data.filter_function_heat,
force_renew=args.force_renew,
recursive=args.recursive,
to_first_order=True,
write_ucd=False,
read_npy=args.read_npy, read_res=args.convert_answer)
raw_converter.convert()
preprocessor = siml.prepost.Preprocessor(
main_setting, force_renew=args.force_renew,
allow_missing=True)
preprocessor.convert_interim_data(preprocessor_pkl=args.preprocessors_pkl)
return
if __name__ == '__main__':
main()
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...dyn.util.alias_programmatic_pair import AliasProgrammaticPair as AliasProgrammaticPair
from ...dyn.util.atom_class_request import AtomClassRequest as AtomClassRequest
from ...dyn.util.atom_description import AtomDescription as AtomDescription
from ...dyn.util.bootstrap_macro_expander import BootstrapMacroExpander as BootstrapMacroExpander
from ...dyn.util.cell_protection import CellProtection as CellProtection
from ...dyn.util.changes_event import ChangesEvent as ChangesEvent
from ...dyn.util.changes_set import ChangesSet as ChangesSet
from ...dyn.util.close_veto_exception import CloseVetoException as CloseVetoException
from ...dyn.util.color import Color as Color
from ...dyn.util.data_editor_event import DataEditorEvent as DataEditorEvent
from ...dyn.util.data_editor_event_type import DataEditorEventType as DataEditorEventType
from ...dyn.util.date import Date as Date
from ...dyn.util.date_time import DateTime as DateTime
from ...dyn.util.date_time_range import DateTimeRange as DateTimeRange
from ...dyn.util.date_time_with_timezone import DateTimeWithTimezone as DateTimeWithTimezone
from ...dyn.util.date_with_timezone import DateWithTimezone as DateWithTimezone
from ...dyn.util.duration import Duration as Duration
from ...dyn.util.element_change import ElementChange as ElementChange
from ...dyn.util.endianness import Endianness as Endianness
from ...dyn.util.endianness import EndiannessEnum as EndiannessEnum
from ...dyn.util.invalid_state_exception import InvalidStateException as InvalidStateException
from ...dyn.util.job_manager import JobManager as JobManager
from ...dyn.util.language import Language as Language
from ...dyn.util.macro_expander import MacroExpander as MacroExpander
from ...dyn.util.malformed_number_format_exception import MalformedNumberFormatException as MalformedNumberFormatException
from ...dyn.util.measure_unit import MeasureUnit as MeasureUnit
from ...dyn.util.measure_unit import MeasureUnitEnum as MeasureUnitEnum
from ...dyn.util.mode_change_event import ModeChangeEvent as ModeChangeEvent
from ...dyn.util.not_locked_exception import NotLockedException as NotLockedException
from ...dyn.util.not_numeric_exception import NotNumericException as NotNumericException
from ...dyn.util.number_format import NumberFormat as NumberFormat
from ...dyn.util.number_format import NumberFormatEnum as NumberFormatEnum
from ...dyn.util.number_format_properties import NumberFormatProperties as NumberFormatProperties
from ...dyn.util.number_format_settings import NumberFormatSettings as NumberFormatSettings
from ...dyn.util.number_formats import NumberFormats as NumberFormats
from ...dyn.util.number_formats_supplier import NumberFormatsSupplier as NumberFormatsSupplier
from ...dyn.util.number_formatter import NumberFormatter as NumberFormatter
from ...dyn.util.office_installation_directories import OfficeInstallationDirectories as OfficeInstallationDirectories
from ...dyn.util.path_settings import PathSettings as PathSettings
from ...dyn.util.path_substitution import PathSubstitution as PathSubstitution
from ...dyn.util.replace_descriptor import ReplaceDescriptor as ReplaceDescriptor
from ...dyn.util.revision_tag import RevisionTag as RevisionTag
from ...dyn.util.search_algorithms import SearchAlgorithms as SearchAlgorithms
from ...dyn.util.search_algorithms2 import SearchAlgorithms2 as SearchAlgorithms2
from ...dyn.util.search_algorithms2 import SearchAlgorithms2Enum as SearchAlgorithms2Enum
from ...dyn.util.search_descriptor import SearchDescriptor as SearchDescriptor
from ...dyn.util.search_flags import SearchFlags as SearchFlags
from ...dyn.util.search_flags import SearchFlagsEnum as SearchFlagsEnum
from ...dyn.util.search_options import SearchOptions as SearchOptions
from ...dyn.util.search_options2 import SearchOptions2 as SearchOptions2
from ...dyn.util.search_result import SearchResult as SearchResult
from ...dyn.util.sort_descriptor import SortDescriptor as SortDescriptor
from ...dyn.util.sort_descriptor2 import SortDescriptor2 as SortDescriptor2
from ...dyn.util.sort_field import SortField as SortField
from ...dyn.util.sort_field_type import SortFieldType as SortFieldType
from ...dyn.util.sortable import Sortable as Sortable
from ...dyn.util.text_search import TextSearch as TextSearch
from ...dyn.util.text_search2 import TextSearch2 as TextSearch2
from ...dyn.util.time import Time as Time
from ...dyn.util.time_with_timezone import TimeWithTimezone as TimeWithTimezone
from ...dyn.util.tri_state import TriState as TriState
from ...dyn.util.url import URL as URL
from ...dyn.util.url_transformer import URLTransformer as URLTransformer
from ...dyn.util.uri_abbreviation import UriAbbreviation as UriAbbreviation
from ...dyn.util.veto_exception import VetoException as VetoException
from ...dyn.util.x_accounting import XAccounting as XAccounting
from ...dyn.util.x_atom_server import XAtomServer as XAtomServer
from ...dyn.util.x_binary_data_container import XBinaryDataContainer as XBinaryDataContainer
from ...dyn.util.x_broadcaster import XBroadcaster as XBroadcaster
from ...dyn.util.x_cancellable import XCancellable as XCancellable
from ...dyn.util.x_chainable import XChainable as XChainable
from ...dyn.util.x_changes_batch import XChangesBatch as XChangesBatch
from ...dyn.util.x_changes_listener import XChangesListener as XChangesListener
from ...dyn.util.x_changes_notifier import XChangesNotifier as XChangesNotifier
from ...dyn.util.x_changes_set import XChangesSet as XChangesSet
from ...dyn.util.x_cloneable import XCloneable as XCloneable
from ...dyn.util.x_close_broadcaster import XCloseBroadcaster as XCloseBroadcaster
from ...dyn.util.x_close_listener import XCloseListener as XCloseListener
from ...dyn.util.x_closeable import XCloseable as XCloseable
from ...dyn.util.x_data_editor import XDataEditor as XDataEditor
from ...dyn.util.x_data_editor_listener import XDataEditorListener as XDataEditorListener
from ...dyn.util.x_flush_listener import XFlushListener as XFlushListener
from ...dyn.util.x_flushable import XFlushable as XFlushable
from ...dyn.util.x_importable import XImportable as XImportable
from ...dyn.util.x_indent import XIndent as XIndent
from ...dyn.util.x_job_manager import XJobManager as XJobManager
from ...dyn.util.x_link_update import XLinkUpdate as XLinkUpdate
from ...dyn.util.x_localized_aliases import XLocalizedAliases as XLocalizedAliases
from ...dyn.util.x_lockable import XLockable as XLockable
from ...dyn.util.x_macro_expander import XMacroExpander as XMacroExpander
from ...dyn.util.x_mergeable import XMergeable as XMergeable
from ...dyn.util.x_mode_change_approve_listener import XModeChangeApproveListener as XModeChangeApproveListener
from ...dyn.util.x_mode_change_broadcaster import XModeChangeBroadcaster as XModeChangeBroadcaster
from ...dyn.util.x_mode_change_listener import XModeChangeListener as XModeChangeListener
from ...dyn.util.x_mode_selector import XModeSelector as XModeSelector
from ...dyn.util.x_modifiable import XModifiable as XModifiable
from ...dyn.util.x_modifiable2 import XModifiable2 as XModifiable2
from ...dyn.util.x_modify_broadcaster import XModifyBroadcaster as XModifyBroadcaster
from ...dyn.util.x_modify_listener import XModifyListener as XModifyListener
from ...dyn.util.x_number_format_previewer import XNumberFormatPreviewer as XNumberFormatPreviewer
from ...dyn.util.x_number_format_types import XNumberFormatTypes as XNumberFormatTypes
from ...dyn.util.x_number_formats import XNumberFormats as XNumberFormats
from ...dyn.util.x_number_formats_supplier import XNumberFormatsSupplier as XNumberFormatsSupplier
from ...dyn.util.x_number_formatter import XNumberFormatter as XNumberFormatter
from ...dyn.util.x_number_formatter2 import XNumberFormatter2 as XNumberFormatter2
from ...dyn.util.x_office_installation_directories import XOfficeInstallationDirectories as XOfficeInstallationDirectories
from ...dyn.util.x_path_settings import XPathSettings as XPathSettings
from ...dyn.util.x_property_replace import XPropertyReplace as XPropertyReplace
from ...dyn.util.x_protectable import XProtectable as XProtectable
from ...dyn.util.x_refresh_listener import XRefreshListener as XRefreshListener
from ...dyn.util.x_refreshable import XRefreshable as XRefreshable
from ...dyn.util.x_replace_descriptor import XReplaceDescriptor as XReplaceDescriptor
from ...dyn.util.x_replaceable import XReplaceable as XReplaceable
from ...dyn.util.x_search_descriptor import XSearchDescriptor as XSearchDescriptor
from ...dyn.util.x_searchable import XSearchable as XSearchable
from ...dyn.util.x_sortable import XSortable as XSortable
from ...dyn.util.x_string_abbreviation import XStringAbbreviation as XStringAbbreviation
from ...dyn.util.x_string_escape import XStringEscape as XStringEscape
from ...dyn.util.x_string_mapping import XStringMapping as XStringMapping
from ...dyn.util.x_string_substitution import XStringSubstitution as XStringSubstitution
from ...dyn.util.x_string_width import XStringWidth as XStringWidth
from ...dyn.util.x_text_search import XTextSearch as XTextSearch
from ...dyn.util.x_text_search2 import XTextSearch2 as XTextSearch2
from ...dyn.util.x_time_stamped import XTimeStamped as XTimeStamped
from ...dyn.util.xurl_transformer import XURLTransformer as XURLTransformer
from ...dyn.util.x_unique_id_factory import XUniqueIDFactory as XUniqueIDFactory
from ...dyn.util.x_updatable import XUpdatable as XUpdatable
from ...dyn.util.x_updatable2 import XUpdatable2 as XUpdatable2
from ...dyn.util.x_veto import XVeto as XVeto
from ...dyn.util.the_macro_expander import theMacroExpander as theMacroExpander
from ...dyn.util.the_office_installation_directories import theOfficeInstallationDirectories as theOfficeInstallationDirectories
from ...dyn.util.the_path_settings import thePathSettings as thePathSettings
|
# Generated by Django 3.1.7 on 2021-03-28 19:57
import angalabiri.causes.models
from django.db import migrations
import django_resized.forms
class Migration(migrations.Migration):
dependencies = [
('causes', '0003_auto_20210324_0236'),
]
operations = [
migrations.AlterField(
model_name='cause',
name='image',
field=django_resized.forms.ResizedImageField(blank=True, crop=['middle', 'center'], force_format='JPEG', keep_meta=True, null=True, quality=75, size=[1920, 1148], upload_to=angalabiri.causes.models.cause_file_path, verbose_name='Upload Cause Image'),
),
]
|
def nswp(n):
if n < 2: return 1
a, b = 1, 1
for i in range(2, n + 1):
c = 2 * b + a
a = b
b = c
return b
n = 3
print(nswp(n))
|
"""
1.Question 1
In this programming problem you'll code up Dijkstra's shortest-path algorithm.
The file (dijkstraData.txt) contains an adjacency list representation of an undirected weighted graph with 200 vertices labeled 1 to 200. Each row consists of the node tuples that are adjacent to that particular vertex along with the length of that edge. For example, the 6th row has 6 as the first entry indicating that this row corresponds to the vertex labeled 6. The next entry of this row "141,8200" indicates that there is an edge between vertex 6 and vertex 141 that has length 8200. The rest of the pairs of this row indicate the other vertices adjacent to vertex 6 and the lengths of the corresponding edges.
Your task is to run Dijkstra's shortest-path algorithm on this graph, using 1 (the first vertex) as the source vertex, and to compute the shortest-path distances between 1 and every other vertex of the graph. If there is no path between a vertex vv and vertex 1, we'll define the shortest-path distance between 1 and vv to be 1000000.
You should report the shortest-path distances to the following ten vertices, in order: 7,37,59,82,99,115,133,165,188,197. You should encode the distances as a comma-separated string of integers. So if you find that all ten of these vertices except 115 are at distance 1000 away from vertex 1 and 115 is 2000 distance away, then your answer should be 1000,1000,1000,1000,1000,2000,1000,1000,1000,1000. Remember the order of reporting DOES MATTER, and the string should be in the same order in which the above ten vertices are given. The string should not contain any spaces. Please type your answer in the space provided.
IMPLEMENTATION NOTES: This graph is small enough that the straightforward O(mn)O(mn) time implementation of Dijkstra's algorithm should work fine. OPTIONAL: For those of you seeking an additional challenge, try implementing the heap-based version. Note this requires a heap that supports deletions, and you'll probably need to maintain some kind of mapping between vertices and their positions in the heap.
"""
from MinHeapForDijkstra import *
########################################################
# Reading the data, store the number of nodes
file = open("data/dijkstraData.txt", "r")
data = file.readlines()
num_nodes = len(data)
########################################################
# Class definitions
class Node(object):
next_node = {} # use a dictionary to store the information of the next nodes (index and distance)
def __init__(self):
self.next_node = {}
def add_next_distance(self, next_node_index, next_node_distance):
self.next_node[next_node_index] = next_node_distance
class Graph(object):
num_nodes = 0 # the number of total nodes
graph = [Node() for i in range(num_nodes + 1)] # list of Node objects, index of graph represents index of nodes
# store the information of shortest path distances (from source), index represents the index of nodes
shortest_path_distance = [None for i in range(num_nodes + 1)]
def __init__(self, value):
self.num_nodes = value
self.graph = [Node() for i in range(value + 1)] # we don't use index 0
self.shortest_path_distance = [None for i in range(value + 1)]
def ReadingData(self, data):
"""
Reading data into the Graph, store in self.graph
Args:
data: input data of graph information (stored in list for every node)
"""
for line in data:
items = line.split()
for i in range(1, len(items)):
next_node_index = int(items[i].split(',')[0])
next_node_distance = int(items[i].split(',')[1])
self.graph[int(items[0])].add_next_distance(next_node_index, next_node_distance)
def Dijkstra(self, source: int):
"""
Use Dijkstra Algorithm to find the shortest path distance for every node from source
Args:
source: the source index
"""
self.shortest_path_distance[source] = 0
visited = [False] * (self.num_nodes + 1)
visited[0] = True # we don't use index 0
visited[source] = True
# initialize the heap for storing the minimum of Dijkstra values
heap = MinHeapForDijkstra()
# last_node represents the last explored node
last_node = source
while visited != [True for i in range(self.num_nodes + 1)]:
# UPDATE HEAP PROCEDURE
# loop over the head vertices of last_node in the unexplored area
for w in self.graph[last_node].next_node.keys():
if not visited[w]:
# calculate the Dijkstra value
dijkstra_value = self.shortest_path_distance[last_node] + self.graph[last_node].next_node[w]
# if w not in the index list of the heap, add it
if w not in heap.index:
heap.push(w, dijkstra_value)
# if w in the index list of the heap, compare with the old value
else:
# find the heap index of w in the heap, in order to retrieve the corresponding value in the heap
heap_index = None
for i in range(len(heap.heap)):
if heap.index[i] == w:
heap_index = i
break
# if the new Dijkstra value is smaller, update it; otherwise do nothing
if dijkstra_value < heap.heap[heap_index]:
heap.delete(heap_index)
heap.push(w, dijkstra_value)
# pop the heap and add the next node into the explored area, update shortest_path_distance
try:
next_node, next_node_distance = heap.pop()
self.shortest_path_distance[next_node] = next_node_distance
visited[next_node] = True
last_node = next_node
# if the heap is empty, means the search is finished, mark the rest vertices as 1000000
except IndexError:
for i in range(1, self.num_nodes+1):
if not visited[i]:
visited[i] = True
self.shortest_path_distance[i] = 1000000
########################################################
# Using Dijkstra Algorithm to calculate the shortest path distance from source 1
g = Graph(num_nodes)
g.ReadingData(data)
g.Dijkstra(1)
answer = g.shortest_path_distance
print(answer[1:31])
|
from FundCompanyListCrawler import *
import pandas as pd
#定义基金公司分析数据
class FundCompanyAnalysis:
def __init__(self):
return
#获取成立最久的10家基金公司
@staticmethod
def getMaxBuildFundCompanyList():
fundCompanyInfoList=FundCompanyListCrawler.getFundCompanyDataList()
df=pd.DataFrame(fundCompanyInfoList)
print(df.info())
#获取基金公司名称 和份额
df_data=df[['companyName','createdDate']]
print(df_data)
df_data_sort=df_data.sort_values('createdDate',ascending=True)
print(df_data_sort)
#取前10基金公司
top10data=df_data_sort.head(10)
print(top10data)
top10data.to_csv('fundCompanyBuildOldList.csv',index=False)
pass
@staticmethod
def getMaxMarketValueList():
fundCompanyInfoList=FundCompanyListCrawler.getFundCompanyDataList()
#print(fundCompanyInfoList)
df=pd.DataFrame(fundCompanyInfoList)
print(df.info())
#获取基金公司名称 和份额
df_data=df[['companyName','managementScale']]
print(df_data)
#将managementScale 字符串转化成数字
managementScaleList=df['managementScale'].tolist()
newManagementScaleList=[]
print(managementScaleList)
for item in managementScaleList:
if item=='---':
item='0.0亿元'
#除去亿元两个字
item=item.replace('亿元','')
#字符串转化成float
item=float(item)
newManagementScaleList.append(item)
#添加新的一列数据
df_data['managementScale']=newManagementScaleList
#按照资产规模,降序排列
df_data_sort=df_data.sort_values('managementScale',ascending=False)
print(df_data_sort)
#取10行数据
top10data=df_data_sort.head(10)
#重新排序升序
top10data_asc=top10data.sort_values('managementScale',ascending=True)
#将两列数据导出来
companyNameList=top10data_asc['companyName'].values.tolist()
managementScaleList=top10data_asc['managementScale'].values.tolist()
print(companyNameList)
print(managementScaleList)
seris=df['fundNo_number']=df['fundNo']
seris.map(lambda x:100)
#print(type(df['fundNo_number']))
#print(df.info())
#排序
sortDf=df.sort_values('fundNo_number',ascending=False)
#print(sortDf.head(10)['fundNo_number'])
#print(df)
pass
|
# Echo client program
import socket
HOST = '192.168.2.3' # The remote host
PORT = 8001 # The same port as used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(b'Hello, Jack')
data = s.recv(1024)
print('Received', repr(data))
|
#!/usr/bin/env python
from tenable.io import TenableIO
from csv import DictWriter
import collections, click, logging
def flatten(d, parent_key='', sep='.'):
'''
Flattens a nested dict. Shamelessly ripped from
`this <https://stackoverflow.com/a/6027615>`_ Stackoverflow answer.
'''
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def export_vulns_to_csv(fobj, vulns, *fields):
'''
Generates a CSV file from the fields specified and will pass the keywords
on to the vuln export.
Args:
fobj (str): The file object of the csv file to write.
*fields (list): A listing of fields to export.
Returns:
None
Examples:
Basic Export:
>>> export_vulns_to_csv('example.csv')
Choosing the Fields to Export for high and critical vulns:
>>> fields = ['plugin.id', 'plugin.name', 'asset.uuid']
>>> vulns = tio.exports.vulns()
>>> with open('example.csv', 'w') as report:
... export_vulns_to_csv(report, vulns, *fields)
'''
if not fields:
fields = [
'asset.fqdn',
'asset.hostname',
'asset.operating_system',
'asset.uuid',
'first_found',
'last_found',
'plugin.id',
'plugin.name',
'plugin.cve',
'plugin.cvss_base_score',
'plugin.csvv_temporal_score',
'port.port',
'port.protocol',
'severity',
'state'
]
# Instantiate the dictionary writer, pass it the fields that we would like
# to have recorded to the file, and inform the writer that we want it to
# ignore the rest of the fields that may be passed to it.
writer = DictWriter(fobj, fields, extrasaction='ignore')
writer.writeheader()
counter = 0
for vuln in vulns:
counter += 1
# We need the vulnerability dictionary flattened out and all of the
# lists converted into a pipe-delimited string.
flat = flatten(vuln)
for k, v in flat.items():
if isinstance(v, list):
flat[k] = '|'.join([str(i) for i in v])
# Write the vulnerability to the CSV File.
writer.writerow(flat)
return counter
@click.command()
@click.argument('output', type=click.File('w'))
@click.option('--tio-access-key', 'akey', help='Tenable.io API Access Key')
@click.option('--tio-secret-key', 'skey', help='Tenable.io API Secret Key')
@click.option('--severity', 'sevs', multiple=True, help='Vulnerability Severity')
@click.option('--last-found', type=click.INT,
help='Vulnerability Last Found Timestamp')
@click.option('--cidr', help='Restrict export to this CIDR range')
@click.option('--tag', 'tags', multiple=True, nargs=2, type=(str, str),
help='Tag Key/Value pair to restrict the export to.')
@click.option('--field', '-f', 'fields', multiple=True,
help='Field to export to CSV')
@click.option('--verbose', '-v', envvar='VERBOSITY', default=0,
count=True, help='Logging Verbosity')
def cli(output, akey, skey, sevs, last_found, cidr, tags, fields, verbose):
'''
Export -> CSV Writer
Generates a CSV File from the vulnerability export using the fields
specified.
'''
# Setup the logging verbosity.
if verbose == 0:
logging.basicConfig(level=logging.WARNING)
if verbose == 1:
logging.basicConfig(level=logging.INFO)
if verbose > 1:
logging.basicConfig(level=logging.DEBUG)
# Instantiate the Tenable.io instance & initiate the vulnerability export.
tio = TenableIO(akey, skey)
vulns = tio.exports.vulns(last_found=last_found, severity=list(sevs),
cidr_range=cidr, tags=list(tags))
# Pass everything to the CSV generator.
total = export_vulns_to_csv(output, vulns, *fields)
click.echo('Processed {} Vulnerabilities'.format(total))
if __name__ == '__main__':
cli()
|
import torch.optim as optim
from torchvision.utils import save_image
from _datetime import datetime
from libs.compute import *
from libs.constant import *
from libs.model import *
if __name__ == "__main__":
start_time = datetime.now()
learning_rate = LEARNING_RATE
# Creating generator and discriminator
generator = Generator()
generator = nn.DataParallel(generator)
if torch.cuda.is_available():
generator.cuda(device=device)
# Loading Training and Test Set Data
trainLoader1, trainLoader2, trainLoader_cross, testLoader = data_loader()
### MSE Loss and Optimizer
criterion = nn.MSELoss()
optimizer_g = optim.Adam(generator.parameters(), lr=LEARNING_RATE, betas=(BETA1, BETA2))
### GENERATOR PRE-TRAINING LOOP
print("Pre-training loop starting")
batches_done = 0
running_loss = 0.0
running_losslist = []
for epoch in range(NUM_EPOCHS_PRETRAIN):
for param_group in optimizer_g.param_groups:
param_group['lr'] = adjustLearningRate(learning_rate, epoch_num=epoch, decay_rate=DECAY_RATE)
for i, (target, input) in enumerate(trainLoader1, 0):
unenhanced_image = input[0]
enhanced_image = target[0]
unenhanced = Variable(unenhanced_image.type(Tensor_gpu))
enhanced = Variable(enhanced_image.type(Tensor_gpu))
optimizer_g.zero_grad()
generated_enhanced_image = generator(enhanced)
loss = criterion(unenhanced, generated_enhanced_image)
loss.backward(retain_graph=True)
optimizer_g.step()
# Print statistics
running_loss += loss.item()
running_losslist.append(loss.item())
f = open("./models/log/log_PreTraining.txt", "a+")
f.write("[Epoch %d/%d] [Batch %d/%d] [G loss: %f]\n" % (
epoch + 1, NUM_EPOCHS_PRETRAIN + 1, i + 1, len(trainLoader1), loss.item()))
f.close()
# if i % 200 == 200: # print every 200 mini-batches
if i % 1 == 0:
print('[%d, %5d] loss: %.5f' % (epoch + 1, i + 1, running_loss / 5))
running_loss = 0.0
save_image(generated_enhanced_image.data,
"./models/pretrain_images/1Way/gan1_pretrain_%d_%d.png" % (epoch + 1, i + 1),
nrow=8,
normalize=True)
torch.save(generator.state_dict(),
'./models/pretrain_checkpoint/1Way/gan1_pretrain_' + str(epoch + 1) + '_' + str(i + 1) + '.pth')
end_time = datetime.now()
print(end_time-start_time)
f = open("./models/log/log_PreTraining_LossList.txt", "a+")
for item in running_losslist:
f.write('%f\n' % item)
f.close()
|
"""
RateCoefficients.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Wed Dec 26 20:59:24 2012
Description: Rate coefficients for hydrogen and helium. Currently using
Fukugita & Kawasaki (1994). Would be nice to include rates from other sources.
"""
import numpy as np
from scipy.misc import derivative
from ..util.Math import interp1d
from ..util.Math import central_difference
T = None
rate_sources = ['fk94']
class RateCoefficients(object):
def __init__(self, grid=None, rate_src='fk94', T=T, recombination='B',
interp_rc='linear'):
"""
Parameters
----------
grid : rt1d.static.Grid instance
source : str
fk94 (Fukugita & Kawasaki 1994)
chianti
"""
self.grid = grid
self.rate_src = rate_src
self.interp_rc = interp_rc
self.T = T
self.rec = recombination
self.Tarr = 10**np.arange(-1, 6.1, 0.1)
if rate_src not in rate_sources:
raise ValueError(('Unrecognized rate coefficient source ' +\
'\'{!s}\'').format(rate_src))
def CollisionalIonizationRate(self, species, T):
"""
Collisional ionization rate which we denote elsewhere as Beta.
"""
if self.rate_src == 'fk94':
if species == 0:
return 5.85e-11 * np.sqrt(T) * (1. + np.sqrt(T / 1e5))**-1. \
* np.exp(-1.578e5 / T)
if species == 1:
return 2.38e-11 * np.sqrt(T) * (1. + np.sqrt(T / 1e5))**-1. \
* np.exp(-2.853e5 / T)
if species == 2:
return 5.68e-12 * np.sqrt(T) * (1. + np.sqrt(T / 1e5))**-1. \
* np.exp(-6.315e5 / T)
else:
name = self.grid.neutrals[species]
return self.neutrals[name]['ionizRate'](T)
@property
def _dCollisionalIonizationRate(self):
if not hasattr(self, '_dCollisionalIonizationRate_'):
self._dCollisionalIonizationRate_ = {}
for i, absorber in enumerate(self.grid.absorbers):
tmp = derivative(lambda T: self.CollisionalIonizationRate(i, T), self.Tarr)
self._dCollisionalIonizationRate_[i] = interp1d(self.Tarr, tmp,
kind=self.interp_rc)
return self._dCollisionalIonizationRate_
def dCollisionalIonizationRate(self, species, T):
if self.rate_src == 'fk94':
return self._dCollisionalIonizationRate[species](T)
#return derivative(lambda T: self.CollisionalIonizationRate(species, T), T)
else:
name = self.grid.neutrals[species]
return self.neutrals[name]['dionizRate']
def RadiativeRecombinationRate(self, species, T):
"""
Coefficient for radiative recombination. Here, species = 0, 1, 2
refers to HII, HeII, and HeIII.
"""
if self.rec == 0:
return np.zeros_like(T)
if self.rate_src == 'fk94':
if self.rec == 'A':
if species == 0:
return 6.28e-11 * T**-0.5 * (T / 1e3)**-0.2 * (1. + (T / 1e6)**0.7)**-1.
elif species == 1:
return 1.5e-10 * T**-0.6353
elif species == 2:
return 3.36e-10 * T**-0.5 * (T / 1e3)**-0.2 * (1. + (T / 4e6)**0.7)**-1.
elif self.rec == 'B':
if species == 0:
return 2.6e-13 * (T / 1.e4)**-0.85
elif species == 1:
return 9.94e-11 * T**-0.6687
elif species == 2:
alpha = 3.36e-10 * T**-0.5 * (T / 1e3)**-0.2 * (1. + (T / 4.e6)**0.7)**-1 # To n >= 1
if type(T) in [float, np.float64]:
if T < 2.2e4:
alpha *= (1.11 - 0.044 * np.log(T)) # To n >= 2
else:
alpha *= (1.43 - 0.076 * np.log(T)) # To n >= 2
else:
alpha[T < 2.2e4] *= (1.11 - 0.044 * np.log(T[T < 2.2e4])) # To n >= 2
alpha[T >= 2.2e4] *= (1.43 - 0.076 * np.log(T[T >= 2.2e4])) # To n >= 2
return alpha
else:
raise ValueError('Unrecognized RecombinationMethod. Should be A or B.')
else:
name = self.grid.ions[species]
return self.ions[name]['recombRate'](T)
@property
def _dRadiativeRecombinationRate(self):
if not hasattr(self, '_dRadiativeRecombinationRate_'):
self._dRadiativeRecombinationRate_ = {}
for i, absorber in enumerate(self.grid.absorbers):
tmp = derivative(lambda T: self.RadiativeRecombinationRate(i, T), self.Tarr)
self._dRadiativeRecombinationRate_[i] = interp1d(self.Tarr, tmp,
kind=self.interp_rc)
return self._dRadiativeRecombinationRate_
def dRadiativeRecombinationRate(self, species, T):
if self.rate_src == 'fk94':
return self._dRadiativeRecombinationRate[species](T)
#return derivative(lambda T: self.RadiativeRecombinationRate(species, T), T)
else:
name = self.ions.neutrals[species]
return self.ions[name]['drecombRate']
def DielectricRecombinationRate(self, T):
"""
Dielectric recombination coefficient for helium.
"""
if self.rate_src == 'fk94':
return 1.9e-3 * T**-1.5 * np.exp(-4.7e5 / T) * (1. + 0.3 * np.exp(-9.4e4 / T))
else:
raise NotImplementedError()
@property
def _dDielectricRecombinationRate(self):
if not hasattr(self, '_dDielectricRecombinationRate_'):
self._dDielectricRecombinationRate_ = {}
tmp = derivative(lambda T: self.DielectricRecombinationRate(T), self.Tarr)
self._dDielectricRecombinationRate_ = interp1d(self.Tarr, tmp,
kind=self.interp_rc)
return self._dDielectricRecombinationRate_
def dDielectricRecombinationRate(self, T):
if self.rate_src == 'fk94':
return self._dDielectricRecombinationRate(T)
#return derivative(self.DielectricRecombinationRate, T)
else:
raise NotImplementedError()
def CollisionalIonizationCoolingRate(self, species, T):
"""
Returns coefficient for cooling by collisional ionization. These are equations B4.1a, b, and d respectively
from FK96.
units: erg cm^3 / s
"""
if self.rate_src == 'fk94':
if species == 0:
return 1.27e-21 * np.sqrt(T) * (1. + np.sqrt(T / 1e5))**-1. * np.exp(-1.58e5 / T)
if species == 1:
return 9.38e-22 * np.sqrt(T) * (1. + np.sqrt(T / 1e5))**-1. * np.exp(-2.85e5 / T)
if species == 2:
return 4.95e-22 * np.sqrt(T) * (1. + np.sqrt(T / 1e5))**-1. * np.exp(-6.31e5 / T)
else:
raise NotImplemented('Cannot do cooling for rate_source != fk94 (yet).')
@property
def _dCollisionalIonizationCoolingRate(self):
if not hasattr(self, '_dCollisionalIonizationCoolingRate_'):
self._dCollisionalIonizationCoolingRate_ = {}
for i, absorber in enumerate(self.grid.absorbers):
tmp = derivative(lambda T: self.CollisionalExcitationCoolingRate(i, T), self.Tarr)
self._dCollisionalIonizationCoolingRate_[i] = interp1d(self.Tarr, tmp,
kind=self.interp_rc)
return self._dCollisionalIonizationCoolingRate_
def dCollisionalIonizationCoolingRate(self, species, T):
if self.rate_src == 'fk94':
return self._dCollisionalIonizationCoolingRate[species](T)
#return derivative(lambda T: self.CollisionalIonizationCoolingRate(species, T), T)
else:
raise NotImplementedError()
def CollisionalExcitationCoolingRate(self, species, T):
"""
Returns coefficient for cooling by collisional excitation. These are equations B4.3a, b, and c respectively
from FK96.
units: erg cm^3 / s
"""
if self.rate_src == 'fk94':
if species == 0:
return 7.5e-19 * (1. + np.sqrt(T / 1e5))**-1. * np.exp(-1.18e5 / T)
if species == 1:
return 9.1e-27 * T**-0.1687 * (1. + np.sqrt(T / 1e5))**-1. * np.exp(-1.31e4 / T) # CONFUSION
if species == 2:
return 5.54e-17 * T**-0.397 * (1. + np.sqrt(T / 1e5))**-1. * np.exp(-4.73e5 / T)
else:
raise NotImplemented('Cannot do cooling for rate_source != fk94 (yet).')
@property
def _dCollisionalExcitationCoolingRate(self):
if not hasattr(self, '_dCollisionalExcitationCoolingRate_'):
self._dCollisionalExcitationCoolingRate_ = {}
for i, absorber in enumerate(self.grid.absorbers):
tmp = derivative(lambda T: self.CollisionalExcitationCoolingRate(i, T), self.Tarr)
self._dCollisionalExcitationCoolingRate_[i] = interp1d(self.Tarr, tmp,
kind=self.interp_rc)
return self._dCollisionalExcitationCoolingRate_
def dCollisionalExcitationCoolingRate(self, species, T):
if self.rate_src == 'fk94':
return self._dCollisionalExcitationCoolingRate[species](T)
#return derivative(lambda T: self.CollisionalExcitationCoolingRate(species, T), T)
else:
raise NotImplementedError()
def RecombinationCoolingRate(self, species, T):
"""
Returns coefficient for cooling by recombination. These are equations B4.2a, b, and d respectively
from FK96.
units: erg cm^3 / s
"""
if self.rec == 0:
return np.zeros_like(T)
if self.rate_src == 'fk94':
if species == 0:
return 6.5e-27 * np.sqrt(T) * (T / 1e3)**-0.2 * (1.0 + (T / 1e6)**0.7)**-1.0
if species == 1:
return 1.55e-26 * T**0.3647
if species == 2:
return 3.48e-26 * np.sqrt(T) * (T / 1e3)**-0.2 * (1. + (T / 4e6)**0.7)**-1.
else:
raise NotImplemented('Cannot do cooling for rate_source != fk94 (yet).')
@property
def _dRecombinationCoolingRate(self):
if not hasattr(self, '_dRecombinationCoolingRate_'):
self._dRecombinationCoolingRate_ = {}
for i, absorber in enumerate(self.grid.absorbers):
tmp = derivative(lambda T: self.RecombinationCoolingRate(i, T), self.Tarr)
self._dRecombinationCoolingRate_[i] = interp1d(self.Tarr, tmp,
kind=self.interp_rc)
return self._dRecombinationCoolingRate_
def dRecombinationCoolingRate(self, species, T):
if self.rate_src == 'fk94':
return self._dRecombinationCoolingRate[species](T)
#return derivative(lambda T: self.RecombinationCoolingRate(species, T), T)
else:
raise NotImplemented('Cannot do cooling for rate_source != fk94 (yet).')
def DielectricRecombinationCoolingRate(self, T):
"""
Returns coefficient for cooling by dielectric recombination. This is equation B4.2c from FK96.
units: erg cm^3 / s
"""
if self.rate_src == 'fk94':
return 1.24e-13 * T**-1.5 * np.exp(-4.7e5 / T) * (1. + 0.3 * np.exp(-9.4e4 / T))
else:
raise NotImplementedError()
@property
def _dDielectricRecombinationCoolingRate(self):
if not hasattr(self, '_dDielectricRecombinationCoolingRate_'):
tmp = derivative(lambda T: self.DielectricRecombinationCoolingRate(T), self.Tarr)
self._dDielectricRecombinationCoolingRate_ = interp1d(self.Tarr, tmp,
kind=self.interp_rc)
return self._dDielectricRecombinationCoolingRate_
def dDielectricRecombinationCoolingRate(self, T):
if self.rate_src == 'fk94':
return self._dDielectricRecombinationCoolingRate(T)
#return derivative(self.DielectricRecombinationCoolingRate, T)
else:
raise NotImplementedError()
|
#!/usr/bin/env python
# -- coding: utf-8 --
"""
Copyright (c) 2021. All rights reserved.
Created by C. L. Wang on 19.10.21
"""
import os
import sys
from multiprocessing.pool import Pool
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if p not in sys.path:
sys.path.append(p)
from myutils.make_html_page import make_html_page
from myutils.project_utils import *
from x_utils.vpf_sevices import get_vpf_service
from root_dir import DATA_DIR
class ModelEvaluator(object):
"""
评估模型
"""
def __init__(self):
pass
@staticmethod
def prepare_dataset():
folder_path = os.path.join(DATA_DIR, "datasets", "text_line_dataset_c4_20211013_files")
file1 = os.path.join(folder_path, "train_000.txt")
file2 = os.path.join(folder_path, "train_001.txt")
file3 = os.path.join(folder_path, "train_002.txt")
file4 = os.path.join(folder_path, "train_003.txt")
file_list = [file1, file2, file3, file4]
val_file = os.path.join(DATA_DIR, "files_text_line_v2", "val_file_4k_v2.txt")
num = 1000
random.seed(47)
for file_idx, file in enumerate(file_list):
data_lines = read_file(file)
random.shuffle(data_lines)
data_lines = data_lines[:num]
out_lines = ["{}\t{}".format(data_line, file_idx) for data_line in data_lines]
write_list_to_file(val_file, out_lines)
print('[Info] 处理完成: {}, 样本数: {}'.format(val_file, len(read_file(val_file))))
@staticmethod
def predict_img_url(img_url):
"""
预测图像url
"""
# res_dict = get_vpf_service(img_url, service_name="LvQAecdZrrxkLFs6QiUXsF")
res_dict = get_vpf_service(img_url, service_name="ZZHxnYZnkarNPM3RvP4QZP")
# res_dict = get_vpf_service(img_url, service_name="cZiTBF3DaBkCkQFzoHLCHA")
p_list = res_dict["data"]["prob_list"]
return p_list
@staticmethod
def process_line(data_idx, data_line, out_file):
url, label = data_line.split("\t")
p_list = ModelEvaluator.predict_img_url(url)
p_str_list = [str(round(pr, 4)) for pr in p_list]
write_line(out_file, "\t".join([url, label] + p_str_list))
if data_idx % 10 == 0:
print('[Info] data_idx: {}'.format(data_idx))
@staticmethod
def predict_dataset():
val_file = os.path.join(DATA_DIR, "files_text_line_v2", "val_file_4k_v2.txt")
out_file = os.path.join(DATA_DIR, "files_text_line_v2", "val_file_4k_v2_out_batch.{}.txt".format(get_current_time_str()))
print('[Info] 评估文件: {}'.format(val_file))
data_lines = read_file(val_file)
# data_lines = data_lines[:1000]
pool = Pool(processes=100)
for data_idx, data_line in enumerate(data_lines):
# ModelEvaluator.process_line(data_idx, data_line, out_file)
pool.apply_async(ModelEvaluator.process_line, (data_idx, data_line, out_file))
pool.close()
pool.join()
print('[Info] 处理完成: {}, 样本数: {}'.format(out_file, len(read_file(out_file))))
@staticmethod
def get_results_data(in_file):
print('[Info] 测试结果: {}'.format(in_file))
data_lines = read_file(in_file)
items_list = []
for data_line in data_lines:
items = data_line.split("\t")
items_list.append(items)
print('[Info] 测试样本数: {}'.format(len(items_list)))
return items_list
@staticmethod
def confusion_matrix(items_list, label_str_list):
print('[Info] ' + "-" * 100)
print('[Info] 混淆矩阵')
label_dict = collections.defaultdict(list)
for items in items_list:
url = items[0]
gt = int(items[1])
prob_list = [float(i) for i in items[2:]]
pl = np.argmax(prob_list)
label_dict[gt].append(pl)
for label in label_dict.keys():
num_dict = list_2_numdict(label_dict[label])
num_dict = sort_dict_by_key(num_dict)
print('[Info] {}'.format(label_str_list[label]))
print(["{}:{}%".format(label_str_list[items[0]], items[1]/10) for items in num_dict])
label_dict = dict()
for items in items_list:
url = items[0]
gt = int(items[1])
prob_list = [float(i) for i in items[2:]]
pl = int(np.argmax(prob_list))
if pl not in label_dict.keys():
label_dict[pl] = collections.defaultdict(list)
label_dict[pl][gt].append([url, prob_list[gt], prob_list[pl]]) # 预测label
out_dir = os.path.join(DATA_DIR, "results_text_line_v2")
mkdir_if_not_exist(out_dir)
for pl_ in label_dict.keys(): # 预测label
gt_dict = label_dict[pl_]
gt_list_dict = dict()
for gt_ in gt_dict.keys():
url_prob_list = gt_dict[gt_]
url_list, gt_list, pl_list = [], [], []
for url_prob in url_prob_list:
url, gt_prob, pl_prob = url_prob
gt_list.append(gt_prob)
pl_list.append(pl_prob)
url_list.append(url)
pl_list, gt_list, url_list = sort_three_list(pl_list, gt_list, url_list, reverse=True)
gt_list_dict[gt_] = [pl_list, gt_list, url_list]
for gt_ in gt_list_dict.keys():
gt_str = label_str_list[gt_]
pl_str = label_str_list[pl_]
out_html = os.path.join(out_dir, "pl{}_gt{}.html".format(pl_str, gt_str))
pl_list, gt_list, url_list = gt_list_dict[gt_]
html_items = [[url, pl_str, pl_prob, gt_str, gt_prob]
for gt_prob, pl_prob, url in zip(gt_list, pl_list, url_list)]
make_html_page(out_html, html_items)
print('[Info] ' + "-" * 100)
@staticmethod
def pr_curves(items_list):
target_list = []
target_idx = 0
positive_num = 0
for items in items_list:
url = items[0]
gt = int(items[1])
prob_list = [float(i) for i in items[2:]]
pl = np.argmax(prob_list)
tar_prob, other_prob = 0, 0
for prob_idx, prob in enumerate(prob_list):
if prob_idx == target_idx:
tar_prob += prob
else:
other_prob += prob
tar_label = 0 if gt == target_idx else 1
positive_num += 1 if tar_label == 0 else 0
# print('[Info] tar_label: {}, tar_prob: {}, other_prob: {}'.format(tar_label, tar_prob, other_prob))
target_list.append([tar_label, tar_prob, other_prob])
print('[Info] 样本数: {}, 正例数: {}'.format(len(target_list), positive_num))
prob_list = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99]
for prob in prob_list:
recall_num = 0
precision_num = 0
x_num = 0
for items in target_list:
tar_label = items[0]
tar_prob = items[1]
pl = 0 if tar_prob >= prob else 1
if pl == 0 and tar_label == 0:
x_num += 1
if tar_label == 0:
recall_num += 1
if pl == 0:
precision_num += 1
recall = x_num / recall_num
precision = x_num / precision_num
print('[Info] prob: {}, recall: {}'.format(prob, recall))
print('[Info] prob: {}, precision: {}'.format(prob, precision))
@staticmethod
def process_results():
# in_file = os.path.join(DATA_DIR, "files_text_line_v2", "val_file_4k_out.20211021183903.txt")
# in_file = os.path.join(DATA_DIR, "files_text_line_v2", "val_file_4k_v2_out.20211021182532.txt")
# in_file = os.path.join(DATA_DIR, "files_text_line_v2", "val_file_4k_v2_out_m2.20211022174430.txt")
in_file = os.path.join(DATA_DIR, "files_text_line_v2", "val_file_4k_v2_out_batch.20211026155530.txt")
items_list = ModelEvaluator.get_results_data(in_file)
label_str_list = ["印刷文本", "手写文本", "艺术字", "无文字"]
ModelEvaluator.confusion_matrix(items_list, label_str_list) # 计算混淆矩阵
# ModelEvaluator.pr_curves(items_list) # 计算混淆矩阵
@staticmethod
def process():
# 第1步,准备数据集
# ModelEvaluator.prepare_dataset()
# 第2步,预测数据结果
# ModelEvaluator.predict_dataset()
# 第3步,处理测试结果
ModelEvaluator.process_results()
def main():
de = ModelEvaluator()
de.process()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
from Crypto.Hash import SHA256
from Crypto.Signature import PKCS1_v1_5
# from Crypto.PublicKey import RSA
def apply_sig(private_key, input):
digest = SHA256.new()
digest.update(input.encode('utf-8'))
signer = PKCS1_v1_5.new(private_key)
return signer.sign(digest)
def verify_sig(private_key, data, signature):
verifier = PKCS1_v1_5.new(private_key.publickey())
digest = SHA256.new()
digest.update(data.encode('utf-8'))
return verifier.verify(digest, signature)
def get_string_from_key(key):
return key.exportKey().hex()
|
''' This empty module is a placeholder for identifier management tools and directory management tools.
'''
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import block
class Pairwise(nn.Module):
def __init__(self,
residual=True,
fusion_coord={},
fusion_feat={},
agg={}):
super(Pairwise, self).__init__()
self.residual = residual
self.fusion_coord = fusion_coord
self.fusion_feat = fusion_feat
self.agg = agg
#
if self.fusion_coord:
self.f_coord_module = block.factory_fusion(self.fusion_coord)
if self.fusion_feat:
self.f_feat_module = block.factory_fusion(self.fusion_feat)
#
self.buffer = None
def set_buffer(self):
self.buffer = {}
def forward(self, mm, coords=None):
bsize = mm.shape[0]
nregion = mm.shape[1]
Rij = 0
if self.fusion_coord:
assert coords is not None
coords_l = coords[:,:,None,:]
coords_l = coords_l.expand(bsize,nregion,nregion,coords.shape[-1])
coords_l = coords_l.contiguous()
coords_l = coords_l.view(bsize*nregion*nregion,coords.shape[-1])
coords_r = coords[:,None,:,:]
coords_r = coords_r.expand(bsize,nregion,nregion,coords.shape[-1])
coords_r = coords_r.contiguous()
coords_r = coords_r.view(bsize*nregion*nregion,coords.shape[-1])
Rij += self.f_coord_module([coords_l, coords_r])
if self.fusion_feat:
mm_l = mm[:,:,None,:]
mm_l = mm_l.expand(bsize,nregion,nregion,mm.shape[-1])
mm_l = mm_l.contiguous()
mm_l = mm_l.view(bsize*nregion*nregion,mm.shape[-1])
mm_r = mm[:,None,:,:]
mm_r = mm_r.expand(bsize,nregion,nregion,mm.shape[-1])
mm_r = mm_r.contiguous()
mm_r = mm_r.view(bsize*nregion*nregion,mm.shape[-1])
Rij += self.f_feat_module([mm_l, mm_r])
Rij = Rij.view(bsize,nregion,nregion,-1)
if self.agg['type'] == 'max':
mm_new, argmax = Rij.max(2)
else:
mm_new = getattr(Rij, self.agg['type'])(2)
if self.buffer is not None:
self.buffer['mm'] = mm.data.cpu() # bx36x2048
self.buffer['mm_new'] = mm.data.cpu() # bx36x2048
self.buffer['argmax'] = argmax.data.cpu() # bx36x2048
L1_regions = torch.norm(mm_new.data, 1, 2) # bx36
L2_regions = torch.norm(mm_new.data, 2, 2) # bx36
self.buffer['L1_max'] = L1_regions.max(1)[0].cpu() # b
self.buffer['L2_max'] = L2_regions.max(1)[0].cpu() # b
if self.residual:
mm_new += mm
return mm_new
|
#!/usr/bin/python
#
# Copyright 2021, Xcalar Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Eric D. Cohen
import argparse
from collections import Counter
import csv
import subprocess as sp
import re
import sys
import os
argParser = argparse.ArgumentParser()
argParser.add_argument('-b', dest='binary', required=False,
help='Path to binary run under guard rails')
argParser.add_argument('-c', dest='sortct', required=False, action='store_true',
help='Sort by leak count instead total memory leaked')
argParser.add_argument('-f', dest='fin', required=True,
help='GuardRails leak dump CSV file')
argParser.add_argument('-t', dest='top', required=False, type=int, default=sys.maxsize,
help='Only show up to this many top contexts')
args = argParser.parse_args()
class grDump(object):
def __init__(self):
self.data = []
self.leaks = []
# XXX: Needs to be an array of shared lib bases...
self.baseAddr = 0
self.myDir = os.path.dirname(os.path.realpath(__file__))
try:
with open(self.myDir + '/blist.txt', 'r') as fh:
self.blist = fh.read().splitlines()
except IOError:
self.blist = []
def loadData(self):
with open(args.fin) as fh:
self.data = fh.read().splitlines()
self.baseAddr = int(self.data.pop(0).split('-')[0], 16)
print("Program base address {:#x}".format(self.baseAddr))
def resolveSym(self, addr):
addrProc = sp.Popen("addr2line -Cfse " + args.binary + " " + str(addr), shell=True, stdout=sp.PIPE)
return filter(lambda x: x, addrProc.stdout.read().split('\n'))
def resolveSyms(self, addrs):
# addr2line is surprisingly slow; resolves about 6 backtraces/sec
addrProc = sp.Popen("addr2line -Capfse " + args.binary + " " + str(addrs), shell=True, stdout=sp.PIPE)
return addrProc.stdout.read()
def parseLeaks(self):
ctr = Counter(self.data)
leakFreq = ctr.items()
leakFreq.sort(key=lambda x: x[1], reverse=True)
leakFreq = [str(x[1]) + "," + x[0] for x in leakFreq]
csvReader = csv.reader(leakFreq, delimiter=',')
skipped = 0
while True:
try:
row = csvReader.next()
except csv.Error:
skipped += 1
continue
except StopIteration:
break
leak = filter(lambda x: x, row)
count = int(leak[0])
elmBytes = int(leak[1])
totBytes = count * elmBytes
self.leaks.append({'count': count, 'elmBytes': elmBytes, 'totBytes': totBytes, 'backtrace': leak[2:]})
if skipped:
# Error rows are likely due to either a known bug in libunwind or
# GuardRail's current naughty use of SIGUSR2. They are rare enough
# it shouldn't really matter for leak tracking purposes...
print("Skipped %d erroneous leak record" % skipped)
def printLeaks(self):
self.parseLeaks()
totalBytesLeaked = 0
totalLeakCount = 0
numContexts = len(self.leaks)
for leak in self.leaks:
totalBytesLeaked += leak['totBytes']
totalLeakCount += leak['count']
print "Leaked total of {:,d} bytes across {:,d} leaks from {:,d} contexts"\
.format(totalBytesLeaked, totalLeakCount, numContexts)
if args.sortct:
self.leaks.sort(key=lambda x: x['count'], reverse=True)
else:
self.leaks.sort(key=lambda x: x['totBytes'], reverse=True)
context = 0
outStr = ""
for leak in self.leaks:
leakStr = "================================ Context {:>6,d} / {:,d} ================================\n"\
.format(context, numContexts)
leakStr += "Leaked {:,d} bytes across {:,d} allocations of {:,d} bytes each:\n"\
.format(leak['totBytes'], leak['count'], leak['elmBytes'])
leakNum = 0
if args.binary:
# XXX: Need to pull in shared lib bases here.
abs_addrs = [hex(int(x, 16) - self.baseAddr) for x in leak['backtrace']]
syms = self.resolveSyms(' '.join(abs_addrs))
skipLeak = False
for sym in syms.split('\n'):
if not sym:
continue
shortSym = re.sub(r'\(.*?\)', r'', sym)
for b in self.blist:
if re.search(b, shortSym):
skipLeak = True
leakStr += "#{: <2} {}\n".format(leakNum, shortSym)
leakNum += 1
if skipLeak:
continue
else:
outStr += leakStr
else:
for addr in leak['backtrace']:
outStr += "#{: <2} {} (No symbols, see -b option)\n".format(leakNum, addr)
leakNum += 1
context += 1
if context >= args.top:
break
print outStr
dumper = grDump()
dumper.loadData()
dumper.printLeaks()
|
import botocore
import boto3
import os
from os import walk
from tqdm import tqdm
import gzip
if not os.path.exists('cache'):
os.makedirs('cache')
s3 = boto3.resource('s3')
client = boto3.client('s3')
bucket = s3.Bucket('pytorch')
print('Downloading log files')
for key in tqdm(bucket.objects.filter(Prefix='cflogs')):
# print(key.key)
remote_fname = key.key
local_fname = os.path.join('cache', remote_fname)
if not os.path.exists(local_fname):
dirname = os.path.dirname(local_fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
client.download_file("pytorch", remote_fname, local_fname)
size_cache = dict()
def get_size(name):
if name[0] == '/':
name = name[1:]
if name not in size_cache:
for key in bucket.objects.filter(Prefix=name):
size_cache[name] = key.size
return size_cache[name]
valid_cache = dict()
def is_valid(name):
if name not in valid_cache:
exists = False
try:
s3.Object('pytorch', name).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
exists = False
elif e.response['Error']['Code'] == "400":
print(name)
exists = False
else:
raise
else:
exists = True
valid_cache[name] = exists
return valid_cache[name]
# parse all files, read each line, add up all the bytes sizes
print('parsing log files')
bytes_cache = dict()
for (dirpath, dirnames, filenames) in walk('cache/cflogs/'):
for filename in tqdm(filenames):
f = gzip.open(os.path.join(dirpath, filename), 'r')
string = f.read().decode("utf-8")
f.close()
entries = string.splitlines()[2:]
for entry in entries:
columns = entry.split('\t')
filename = columns[7]
if filename[0] == '/':
filename = filename[1:]
bytes_sent = columns[3]
if filename not in bytes_cache:
bytes_cache[filename] = 0
bytes_cache[filename] += int(bytes_sent)
print('Filtering invalid entries')
final_list = dict()
for k, v in tqdm(bytes_cache.items()):
if '.whl' in k and is_valid(k):
final_list[k] = v
print('Counting downloads (bytes sent / filesize)')
total_downloads = 0
for k, v in final_list.items():
sz = get_size(k)
downloads = v / sz
print(k, round(downloads))
total_downloads += downloads
print('')
print('')
print('Total PyTorch wheel downloads: ', round(total_downloads))
print('')
print('')
|
from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.contrib.auth import password_validation, forms as auth_forms
from django.utils.translation import gettext, gettext_lazy as _
from app.models import User
class StringListField(forms.CharField):
def prepare_value(self, value):
if not value:
return ''
return str(value)
def to_python(self, value):
if not value:
return []
return [item.strip() for item in value.split(',')]
class SimCreationForm(forms.Form):
model = forms.FileField(
label='Model',
widget=forms.FileInput(
attrs={
'class': 'form-control'
}
)
)
name = forms.CharField(
max_length=100,
label='Name',
widget=forms.TextInput(
attrs={
'autofocus': True,
'class': 'form-control'
}
)
)
max_epochs = forms.IntegerField(
validators=[MinValueValidator(1)],
label='Total epochs',
widget=forms.NumberInput(
attrs={
'class': 'form-control',
}
)
)
logging_interval = forms.IntegerField(
validators=[MinValueValidator(1)],
label='Epoch period',
widget=forms.NumberInput(
attrs={
'class': 'form-control'
}
)
)
batch_size = forms.IntegerField(
validators=[MinValueValidator(1)],
label='Batch size',
widget=forms.NumberInput(
attrs={
'class': 'form-control'
}
)
)
learning_rate = StringListField(
label='Learning rate',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'style': 'height: 0; margin: 0; border: 0; padding: 0;',
'tabindex': '-1',
'readonly': '',
}
)
)
metrics = forms.MultipleChoiceField(
label='Extra metrics',
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'form-check-input',
}
),
choices=(
('AUC', 'AUC'),
('Accuracy', 'Accuracy'),
('BinaryAccuracy', 'BinaryAccuracy'),
('BinaryCrossentropy', 'BinaryCrossentropy'),
('CategoricalAccuracy', 'CategoricalAccuracy'),
('CategoricalCrossentropy', 'CategoricalCrossentropy'),
('CategoricalHinge', 'CategoricalHinge'),
('CosineSimilarity', 'CosineSimilarity'),
('FalseNegatives', 'FalseNegatives'),
('FalsePositives', 'FalsePositives'),
('Hinge', 'Hinge'),
('KLDivergence', 'KLDivergence'),
('LogCoshError', 'LogCoshError'),
('Mean', 'Mean'),
('MeanAbsoluteError', 'MeanAbsoluteError'),
('MeanAbsolutePercentageError', 'MeanAbsolutePercentageError'),
('MeanIoU', 'MeanIoU'),
('MeanRelativeError', 'MeanRelativeError'),
('MeanSquaredError', 'MeanSquaredError'),
('MeanSquaredLogarithmicError', 'MeanSquaredLogarithmicError'),
('MeanTensor', 'MeanTensor'),
('Poisson', 'Poisson'),
('Precision', 'Precision'),
('PrecisionAtRecall', 'PrecisionAtRecall'),
('Recall', 'Recall'),
('RecallAtPrecision', 'RecallAtPrecision'),
('RootMeanSquaredError', 'RootMeanSquaredError'),
('SensitivityAtSpecificity', 'SensitivityAtSpecificity'),
('SparseCategoricalAccuracy', 'SparseCategoricalAccuracy'),
('SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy'),
('SparseTopKCategoricalAccuracy', 'SparseTopKCategoricalAccuracy'),
('SpecificityAtSensitivity', 'SpecificityAtSensitivity'),
('SquaredHinge', 'SquaredHinge'),
('Sum', 'Sum'),
('TopKCategoricalAccuracy', 'TopKCategoricalAccuracy'),
('TrueNegatives', 'TrueNegatives'),
('TruePositives', 'TruePositives'),
),
required=False
)
metrics_conf = StringListField(
label='Metrics configs',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'style': 'display: none;',
'readonly': '',
}
)
)
optimizer = forms.MultipleChoiceField(
label='Optimizers',
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'form-check-input',
}),
choices=(
('Adadelta', 'Adadelta'),
('Adagrad', 'Adagrad'),
('Adam', 'Adam'),
('Adamax', 'Adamax'),
('Ftrl', 'Ftrl'),
('Nadam', 'Nadam'),
('RMSprop', 'RMSprop'),
('SGD', 'SGD'),
)
)
optimizer_conf = StringListField(
label='Optimizers configs',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'style': 'display: none;',
'readonly': '',
}
)
)
loss_function = forms.MultipleChoiceField(
label='Loss functions',
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'form-check-input',
}),
choices=(
('BinaryCrossentropy', 'BinaryCrossentropy'),
('CategoricalCrossentropy', 'CategoricalCrossentropy'),
('CategoricalHinge', 'CategoricalHinge'),
('CosineSimilarity', 'CosineSimilarity'),
('Hinge', 'Hinge'),
('Huber', 'Huber'),
('KLDivergence', 'KLDivergence'),
('LogCosh', 'LogCosh'),
('MeanAbsoluteError', 'MeanAbsoluteError'),
('MeanAbsolutePercentageError', 'MeanAbsolutePercentageError'),
('MeanSquaredError', 'MeanSquaredError'),
('MeanSquaredLogarithmicError', 'MeanSquaredLogarithmicError'),
('Poisson', 'Poisson'),
('Reduction', 'Reduction'),
('SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy'),
('SquaredHinge', 'SquaredHinge'),
)
)
loss_function_conf = StringListField(
label='Loss functions configs',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'style': 'display: none;',
'readonly': '',
}
)
)
is_k_fold = forms.BooleanField(
label='K-Fold cross-validation',
required=False,
widget=forms.CheckboxInput(
attrs={
'class': 'form-check-input',
}
)
)
k_fold_validation = forms.IntegerField(
label='K-Fold splits',
validators=[MinValueValidator(2)],
required=False,
widget=forms.NumberInput(
attrs={
'class': 'form-control'
}
)
)
tag = forms.CharField(
label='K-Fold tag',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
use_url_datasets = forms.BooleanField(
label='Use links for the datasets',
required=False,
widget=forms.CheckboxInput(
attrs={
'class': 'form-check-input',
}
)
)
train_dataset = forms.FileField(
label='Training dataset',
required=False,
widget=forms.FileInput(
attrs={
'class': 'form-control'
}
)
)
url_train_dataset = forms.URLField(
max_length=255,
label='Training dataset',
required=False,
widget=forms.URLInput(
attrs={
'class': 'form-control'
}
)
)
test_dataset = forms.FileField(
label='Test dataset',
required=False,
widget=forms.FileInput(
attrs={
'class': 'form-control'
}
)
)
url_test_dataset = forms.URLField(
max_length=255,
label='Test dataset',
required=False,
widget=forms.URLInput(
attrs={
'class': 'form-control'
}
)
)
val_dataset = forms.FileField(
label='Validation dataset',
required=False,
widget=forms.FileInput(
attrs={
'class': 'form-control'
}
)
)
url_val_dataset = forms.URLField(
max_length=255,
label='Validation dataset',
required=False,
widget=forms.URLInput(
attrs={
'class': 'form-control'
}
)
)
dataset_format = forms.ChoiceField(
label='Format used by the datasets',
widget=forms.Select(),
choices=(
('npz', '.npz'),
('csv', '.csv'),
('pickle', '.pickle (pandas)'),
('zip', '.zip (pandas)'),
('arff', '.arff'),
('json', '.json'),
)
)
label_column = forms.CharField(
label='Label column',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
train_feature_name = forms.CharField(
label='Training feature name',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
train_label_name = forms.CharField(
label='Training label name',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
test_feature_name = forms.CharField(
label='Test feature name',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
test_label_name = forms.CharField(
label='Test label name',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
val_feature_name = forms.CharField(
label='Validation feature name',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
val_label_name = forms.CharField(
label='Validation label name',
max_length=200,
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control'
}
)
)
extra_tags = StringListField(
label='Tags',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'style': 'display: none;',
'readonly': '',
}
)
)
def clean_logging_interval(self):
cleaned_data = super().clean()
clean_logging_interval = cleaned_data.get("logging_interval")
if clean_logging_interval > cleaned_data.get("max_epochs"):
raise ValidationError("Logging interval is larger than total runtime")
return clean_logging_interval
def clean_k_fold_validation(self):
cleaned_data = super().clean()
clean_k_fold_validation = cleaned_data.get("k_fold_validation")
if cleaned_data.get("is_k_fold") and clean_k_fold_validation is None:
raise ValidationError("K-Fold needs to have splits set")
return clean_k_fold_validation
def clean_tag(self):
cleaned_data = super().clean()
clean_tag = cleaned_data.get("tag")
if cleaned_data.get("is_k_fold") and (clean_tag is None or str(clean_tag).strip() == ''):
raise ValidationError("K-Fold needs to have tag set")
return clean_tag
# def clean_train_dataset(self):
# cleaned_data = super().clean()
# clean_train_dataset = cleaned_data.get("train_dataset")
# if not cleaned_data.get("use_url_datasets"):
# if clean_train_dataset is None:
# raise ValidationError("Training dataset is required")
# if clean_train_dataset.name.split(".")[-1] != cleaned_data.get("dataset_format"):
# raise ValidationError("Please select the correct format below")
# return clean_train_dataset
def clean_url_train_dataset(self):
cleaned_data = super().clean()
clean_url_train_dataset = cleaned_data.get("url_train_dataset")
if cleaned_data.get("use_url_datasets") and (clean_url_train_dataset is None or str(clean_url_train_dataset).strip() == ''):
raise ValidationError("Training dataset is required")
return clean_url_train_dataset
# def clean_test_dataset(self):
# cleaned_data = super().clean()
# clean_test_dataset = cleaned_data.get("test_dataset")
# if not cleaned_data.get("use_url_datasets"):
# if clean_test_dataset is None:
# raise ValidationError("Test dataset is required")
# if clean_test_dataset.name.split(".")[-1] != cleaned_data.get("dataset_format"):
# raise ValidationError("Please select the correct format below")
# return clean_test_dataset
def clean_url_test_dataset(self):
cleaned_data = super().clean()
clean_url_test_dataset = cleaned_data.get("url_test_dataset")
if cleaned_data.get("use_url_datasets") and (clean_url_test_dataset is None or str(clean_url_test_dataset).strip() == ''):
raise ValidationError("Test dataset is required")
return clean_url_test_dataset
# def clean_val_dataset(self):
# cleaned_data = super().clean()
# clean_val_dataset = cleaned_data.get("val_dataset")
# if not cleaned_data.get("use_url_datasets"):
# if clean_val_dataset is None:
# raise ValidationError("Validation dataset is required")
# if clean_val_dataset.name.split(".")[-1] != cleaned_data.get("dataset_format"):
# raise ValidationError("Please select the correct format below")
# return clean_val_dataset
def clean_url_val_dataset(self):
cleaned_data = super().clean()
clean_url_val_dataset = cleaned_data.get("url_val_dataset")
if cleaned_data.get("use_url_datasets") and (clean_url_val_dataset is None or str(clean_url_val_dataset).strip() == ''):
raise ValidationError("Validation dataset is required")
return clean_url_val_dataset
def clean_label_column(self):
cleaned_data = super().clean()
clean_label_column = cleaned_data.get("label_column")
if cleaned_data.get("dataset_format") == "csv" and (clean_label_column is None or str(clean_label_column).strip() == ''):
raise ValidationError("Label column is required")
return clean_label_column
def clean_train_feature_name(self):
cleaned_data = super().clean()
clean_train_feature_name = cleaned_data.get("train_feature_name")
if cleaned_data.get("dataset_format") == "npz" and (clean_train_feature_name is None or str(clean_train_feature_name).strip() == ''):
raise ValidationError("Training feature name is required")
return clean_train_feature_name
def clean_train_label_name(self):
cleaned_data = super().clean()
clean_train_label_name = cleaned_data.get("train_label_name")
if cleaned_data.get("dataset_format") == "npz" and (clean_train_label_name is None or str(clean_train_label_name).strip() == ''):
raise ValidationError("Training label name is required")
return clean_train_label_name
def clean_test_feature_name(self):
cleaned_data = super().clean()
clean_test_feature_name = cleaned_data.get("test_feature_name")
if cleaned_data.get("dataset_format") == "npz" and (clean_test_feature_name is None or str(clean_test_feature_name).strip() == ''):
raise ValidationError("Test feature name is required")
return clean_test_feature_name
def clean_test_label_name(self):
cleaned_data = super().clean()
clean_test_label_name = cleaned_data.get("test_label_name")
if cleaned_data.get("dataset_format") == "npz" and (clean_test_label_name is None or str(clean_test_label_name).strip() == ''):
raise ValidationError("Test label name is required")
return clean_test_label_name
def clean_val_feature_name(self):
cleaned_data = super().clean()
clean_val_feature_name = cleaned_data.get("val_feature_name")
if cleaned_data.get("dataset_format") == "npz" and (clean_val_feature_name is None or str(clean_val_feature_name).strip() == ''):
raise ValidationError("Validation feature name is required")
return clean_val_feature_name
def clean_val_label_name(self):
cleaned_data = super().clean()
clean_val_label_name = cleaned_data.get("val_label_name")
if cleaned_data.get("dataset_format") == "npz" and (clean_val_label_name is None or str(clean_val_label_name).strip() == ''):
raise ValidationError("Validation label name is required")
return clean_val_label_name
class ConfigFileSimCreationForm(forms.Form):
config = forms.FileField(
label='Config',
widget=forms.FileInput(
attrs={
'class': 'form-control',
'id': 'file_id_config'
}
)
)
model = forms.FileField(
label='Model',
widget=forms.FileInput(
attrs={
'class': 'form-control',
'id': 'file_id_model'
}
)
)
use_url_datasets = forms.BooleanField(
label='Use links for the datasets',
required=False,
widget=forms.CheckboxInput(
attrs={
'class': 'form-check-input',
'id': 'file_use_url_datasets',
}
)
)
train_dataset = forms.FileField(
label='Training dataset',
required=False,
widget=forms.FileInput(
attrs={
'class': 'form-control',
'id': 'file_id_train_dataset'
}
)
)
test_dataset = forms.FileField(
label='Test dataset',
required=False,
widget=forms.FileInput(
attrs={
'class': 'form-control',
'id': 'file_id_test_dataset'
}
)
)
val_dataset = forms.FileField(
label='Validation dataset',
required=False,
widget=forms.FileInput(
attrs={
'class': 'form-control',
'id': 'file_id_val_dataset'
}
)
)
def clean_train_dataset(self):
cleaned_data = super().clean()
clean_train_dataset = cleaned_data.get("train_dataset")
if (not cleaned_data.get("use_url_datasets")) and clean_train_dataset is None:
raise ValidationError("Training dataset is required")
return clean_train_dataset
def clean_test_dataset(self):
cleaned_data = super().clean()
clean_test_dataset = cleaned_data.get("test_dataset")
if (not cleaned_data.get("use_url_datasets")) and clean_test_dataset is None:
raise ValidationError("Test dataset is required")
return clean_test_dataset
def clean_val_dataset(self):
cleaned_data = super().clean()
clean_val_dataset = cleaned_data.get("val_dataset")
if (not cleaned_data.get("use_url_datasets")) and clean_val_dataset is None:
raise ValidationError("Validation dataset is required")
return clean_val_dataset
class CustomAuthenticationForm(auth_forms.AuthenticationForm):
def __init__(self, *args, **kwargs):
super(CustomAuthenticationForm, self).__init__(*args, **kwargs)
username = auth_forms.UsernameField(
widget=forms.TextInput(
attrs={
'autofocus': True,
'class': 'form-control'
}
)
)
password = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput(
attrs={
'autocomplete': 'current-password',
'class': 'form-control'
}
),
)
class CustomUserCreationForm(auth_forms.UserCreationForm):
def __init__(self, *args, **kwargs):
super(CustomUserCreationForm, self).__init__(*args, **kwargs)
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput(
attrs={
'autocomplete': 'new-password',
'class': 'form-control'
}
),
help_text="\n".join(password_validation.password_validators_help_texts()),
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput(
attrs={
'autocomplete': 'new-password',
'class': 'form-control'
}
),
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
class Meta:
model = User
fields = ("username",)
field_classes = {'username': auth_forms.UsernameField}
widgets = {'username': forms.TextInput(
attrs={
'class': 'form-control'
}
)}
|
"""
project: Load Flow Calculation
author: @魏明江
time: 2020/02/22
attention:readData.py定义了数据类完成了从txt读取文档,然后建立节点导纳矩阵的过程
Data类包含的属性有 :path, input_file_list, admittance_matrix分别是
源文件路径,读取完成并经过数据转换后的输入列表,以及节点导纳矩阵
Data类包含的可用方法有read_data(self), get_admittance_matrix(self)
分别是读取并转换数据和计算节点导纳矩阵
"""
class Data:
def __init__(self, path_admittance, path_power):
self.path_admittance = path_admittance
self.path_power = path_power
self.bus_type = {}
self.input_file_list_admittance = self.read_admittance_data()
self.input_file_list_power = self.read_power_data()
self.shape = len(self.read_power_data())
rename_data = self.rename_bus() # 由于对节点进行了重编号,建立索引字典
self.no2no = rename_data[0]
self.reno2no = rename_data[2]
self.num_of_pq = rename_data[1]
self.admittance_matrix = self.get_admittance_matrix()['data_array']
self.pq_jacobian_matrix = self.get_admittance_matrix()['b_array']
self.power_data = self.get_power_data()
# 读取输入文件列表
def read_admittance_data(self):
data_txt = []
with open(self.path_admittance, 'r', encoding='utf-8') as data_file:
for each_line in data_file.readlines():
data_each_line = each_line.split(' ')
# 对应字符串类型转换
try:
for i in [1, 3]:
data_each_line[i] = int(data_each_line[i])
for i in range(4, 8):
if data_each_line[i] != '/' and data_each_line[i] != '/\n':
data_each_line[i] = float(data_each_line[i])
else:
data_each_line[i] = '/'
except ValueError:
print('wrong input format!')
# 转换完毕,得到列表
data_txt.append(data_each_line)
return data_txt
# 读取功率输入表
def read_power_data(self):
data_txt = []
with open(self.path_power, 'r', encoding='utf-8') as data_file:
for each_line in data_file.readlines():
data_each_line = each_line.split(' ')
data_each_line[-1] = data_each_line[-1].replace('\n', '')
try:
data_each_line[1] = int(data_each_line[1])
for i in range(2, 10):
if data_each_line[i] not in ['/', '/\n', 'l']:
data_each_line[i] = float(data_each_line[i])
except ValueError:
print('wrong input format!')
# 转换完毕,得到列表
data_txt.append(data_each_line)
data_txt.sort(key=lambda x: (x[1]))
return data_txt
# 节点重编号
def rename_bus(self):
pq_bus = []
pv_bus = []
slack_bus = 0
input_power_data = self.input_file_list_power
input_admittance_data = self.input_file_list_admittance
for branch in input_power_data:
i = branch[1]
if branch[7] != '/': # 参考节点
slack_bus = i
elif branch[6] != '/': # PV节点
pv_bus.append(i)
else: # PQ节点
pq_bus.append(i)
no2no = {}
reno2no = {}
search_list = pq_bus + pv_bus + [slack_bus]
value = 0
for i in search_list:
no2no[i] = value
reno2no[value] = i
input_power_data[i-1][1] = value
value += 1
input_power_data.sort(key=lambda x: (x[1]))
for branch in input_admittance_data:
branch[1] = no2no[branch[1]] + 1
branch[3] = no2no[branch[3]] + 1
return [no2no, len(pq_bus), reno2no]
# 将输入文件列表转换为节点导纳矩阵/快速解耦潮流算法的雅可比矩阵
def get_admittance_matrix(self):
import numpy as np
# 初始化节点导纳矩阵,即建立零矩阵
data_array = np.zeros((self.shape, self.shape), dtype=complex)
b_array = np.zeros((self.shape-1, self.shape-1), dtype=float)
input_data = self.input_file_list_admittance # 获取输入文件列表
# 建立节点导纳矩阵
for branch in input_data:
i = branch[1] - 1
j = branch[3] - 1
if i < self.shape-1 and j < self.shape-1:
b_array[i][j] = 1.0 / branch[5]
b_array[j][i] = 1.0 / branch[5]
b_array[i][i] -= 1.0 / branch[5]
b_array[j][j] -= 1.0 / branch[5]
elif i == self.shape-1:
b_array[j][j] -= 1.0 / branch[5]
else:
b_array[i][i] -= 1.0 / branch[5]
if branch[3] == 0: # 判断是否为接地节点
data_array[i][i] += 1.0/complex(branch[4], branch[5])
elif branch[7] != '/': # 判断是否为变压器节点
data_array[i][i] += 1.0/complex(branch[4], branch[5])
data_array[j][j] += 1.0/((branch[7]**2) * complex(branch[4], branch[5]))
mutual_admittance = 1/(branch[7] * complex(branch[4], branch[5]))
data_array[i][j] -= mutual_admittance
data_array[j][i] -= mutual_admittance
else:
self_admittance = complex(0, branch[6]) + 1.0/complex(branch[4], branch[5])
data_array[i][i] += self_admittance
data_array[j][j] += self_admittance
mutual_admittance = 1.0/complex(branch[4], branch[5])
data_array[i][j] -= mutual_admittance
data_array[j][i] -= mutual_admittance
# 节点导纳矩阵建立完毕
return {'data_array': data_array, 'b_array': b_array}
# 读取输入功率的
def get_power_data(self):
input_power = self.input_file_list_power
known_pq = [0.0] * (self.shape-1 + self.num_of_pq)
voltage = [1.0] * self.shape
angle = [0.0] * self.shape
for branch in input_power:
if branch[1] == (self.shape-1):
angle[branch[1]] = branch[7] # 读取参考角度
if branch[1] < self.shape-1:
known_pq[branch[1]] = branch[2] - branch[4] # 读取有功功率
if branch[1] < self.num_of_pq:
known_pq[self.shape - 1 + branch[1]] = branch[3] - branch[5] # 读取无功功率
else:
voltage[branch[1]] = branch[6] # 读取电压
return {
'known_pq': known_pq,
'voltage': voltage,
'angle': angle,
}
if __name__ == '__main__':
data = Data(path_admittance='./data.txt', path_power='./data_power.txt')
for i in range(9):
for j in range(9):
k = data.admittance_matrix[i][j]
if k != 0:
print('Y{}{}:{}+j{}'.format(i+1,j+1,round(k.real, 4), round(k.imag, 4)))
print(data.pq_jacobian_matrix)
print(data.pq_jacobian_matrix[:data.num_of_pq, :data.num_of_pq])
"""print(data.input_file_list_admittance)
print(data.power_data['known_pq'])
print(data.power_data['voltage'])
print(data.power_data['angle'])
print(data.admittance_matrix)"""
|
import copy
import datetime
import gc
import json
import lmdb
import logging
import math
import matplotlib
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import os
import pickle
import pprint
import subprocess
import struct
import sys
import tletools
from .model import DataModel
from .defs import *
from .utils import *
from .transformer import Jazz
from .cache import GabbyCache
class GabbyPlotContext(object):
"""Easily serializeable object with necessary data for plotting.
We break this out into a separate and object to make it easier to
use multiprocessing to process images. For convenience, we place
the logic to load the data in here. It also makes unit tests
easier as we don't have to entangle the logic for matplotlib with
the logic for the data.
"""
def __init__(self, tgt, data, output_dir, img_dir=None):
"""
tgt: configparser results for the plot section
data: FragmentData
fragments: [str, ...] list of fragment names
start_d: starting datetime
end_d: ending datetime
output_dir: just what it says
indexes: list of integer indexes for the images to build
"""
# Preserve the basic inputs
self.tgt = tgt
self.data = data
self.output_dir = output_dir
# We're going to store the images in a separate directory for
# cleanliness
if not img_dir: img_dir = os.path.join(self.output_dir, 'gabby-img')
self.img_dir = img_dir
# The colors are meant to be interpreted directly by matplotlib
self.apogee_color = self.tgt['apogee-color']
self.perigee_color = self.tgt['perigee-color']
# The datetime objects along the x-axis of the plot with the
# number of fragments.
self.Xt = np.arange(self.data.start_d,
self.data.end_d+self.data.dt,
self.data.dt)
self.Xts = np.arange(dt_to_ts(self.data.start_d),
dt_to_ts(self.data.end_d+self.data.dt),
self.data.dt.total_seconds())
# The date on which we start showing forward propagation
if 'fwd-prop-start-date' in self.tgt:
timestr = self.tgt['fwd-prop-start-date']
self.fwd_prop_start_dt = parse_date_d(timestr)
self.fwd_prop_start_ts = dt_to_ts(self.fwd_prop_start_dt)
for i in range(len(self.Xts)):
if self.Xts[i] == self.fwd_prop_start_ts:
self.fwd_prop_idx = i
break
elif self.Xts[i] > self.fwd_prop_start_ts:
self.fwd_prop_idx = i-1
break
else:
self.fwd_prop_start_date = None
self.fwd_prop_idx = None
def fetch_from_db(self, db):
"""Fetches any necessary data from the DB for the plot context.
This largely fetches the latest APT values for the
comparators. The rest of the data will have already been
fetched.
"""
# Basic read-only transaction
txn = db.txn()
# Loads the comparators
static_comparators = json.loads(self.tgt['static-comparators'])
logging.info(f" Using static comparators: {static_comparators}")
comparators = {}
for comp in static_comparators:
des, name, color, = comp
cur = {'name': name,
'color': color,
'apt': tuple(db.get_latest_apt(txn, des)),
'is_static': True
}
comparators[des] = cur
txn.commit()
self.comparators = comparators
def plt_setup(self):
"""Performs setup of matplotlib objects.
Matplotlib objects can't be serialized so they have to be
re-generated for each process.
"""
# The legend is shared by all and customized.
self.legend_contents = [
matplotlib.lines.Line2D([0], [0],
color='white',
markerfacecolor=self.apogee_color,
marker='.',
markersize=12),
matplotlib.lines.Line2D([0], [0],
color='white',
markerfacecolor=self.perigee_color,
marker='.',
markersize=12),
]
self.legend_labels = [
'Fragment Apogee',
'Fragment Perigee',
]
# Process the comparators
comp_X = []
comp_Y = []
comp_C = []
for des in self.comparators:
cur = self.comparators[des]
apt = cur['apt']
color = cur['color']
name = cur['name']
A,P,T, = apt
O = (A+P)/2
legend_patch = matplotlib.lines.Line2D([0], [0],
color='white',
markerfacecolor=color,
marker='o',
markersize=12)
self.legend_contents.append(legend_patch)
self.legend_labels.append(name)
comp_X.append(T)
comp_Y.append(O)
comp_C.append(color)
self.comp_X = comp_X
self.comp_Y = comp_Y
self.comp_C = comp_C
class GabbyPlotter(object):
def __init__(self,
cfg=None,
tgt=None,
img_dir=None,
output_dir=None,
cache_dir=None,
data=None,
db=None):
self.cfg = cfg
self.tgt = tgt
self.img_dir = img_dir
self.output_dir = output_dir
self.cache_dir = cache_dir
self.data = data
self.db = db
self.cache = GabbyCache(cache_dir) if cache_dir else None
def plot_prediction(self):
jazz = Jazz(self.cfg,
self.frag_env,
self.frag_apt,
self.frag_tle,
self.frag_scope)
frags, apt, deriv, N = jazz.derivatives(fltr=jazz.lpf(),
cache_dir=self.cache_dir)
(moral_decay,
bins_A, bins_P,
Ap, Ad, Pp, Pd,) = jazz.decay_rates(apt, deriv, N)
n_A_bins = self.cfg.getint('n-apogee-bins')
n_P_bins = self.cfg.getint('n-perigee-bins')
n_D_bins = self.cfg.getint('n-deriv-bins')
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1)
ax.hist(Ap, bins=n_A_bins+2)
fig.savefig(os.path.join(self.output_dir, 'Ap_hist.png'))
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1)
ax.hist(Pp, bins=n_P_bins+2)
fig.savefig(os.path.join(self.output_dir, 'Pp_hist.png'))
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1)
ax.hist(Ad, bins=n_D_bins+2)
fig.savefig(os.path.join(self.output_dir, 'Ad_hist.png'))
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1)
ax.hist(Pd, bins=n_D_bins+2)
fig.savefig(os.path.join(self.output_dir, 'Pd_hist.png'))
tmp = np.concatenate(Ad)
fig = plt.figure(figsize=(12, 8))
fig.set_dpi(self.tgt.getint('dpi'))
ax = fig.add_subplot(1, 1, 1)
tmp = np.sort(tmp)
N = len(tmp)
tmp = tmp[N//5:-1*(N//5)]
ax.hist(tmp, bins=100)
fig.savefig('output/wat.png')
sys.exit(0)
def plot_scope(self):
# Build the plot of debris pieces
Xt = [ts_to_dt(start_d)]
for i in range(last_idx): Xt.append(Xt[-1]+dt)
N = np.zeros(len(Xt))
idx = 0
for d in Xt:
n = 0
ts = int(d.timestamp())
for des in scope_start:
start = scope_start[des]
end = scope_end[des]
if start <= ts <= end: n += 1
N[idx] = n
idx += 1
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1)
ax.grid()
ax.plot(Xt, N)
img_path = os.path.join(self.output_dir, "num_fragments.png")
fig.savefig(img_path)
@classmethod
def _plt_setup(cls, ctx):
"""Perform initialization that doesn't serialize.
This only needs to be once per process.
"""
# We disable the interactive representation so we don't
# duplicate effort. This saves us a considerable amount of
# time during image generation.
plt.ioff()
# We get some pretty bad memory leaks with the default backend
# when used on mac. I haven't checked this for windows or
# linux, so be wary.
if 'darwin' in sys.platform: matplotlib.use('TkAgg')
# The cached PLT legend entries, etc need to be built on a
# per-process basis also.
ctx.plt_setup()
@classmethod
def _mp_gabby_plot(cls, ctx):
"""Multiprocessing method for gabby plots.
"""
# Since it's a new process, we'll need to reinitialize the
# logging infrastructure
logging.basicConfig(level=logging.INFO)
# This routine has to be run once per os level process
GabbyPlotter._plt_setup(ctx)
for idx in ctx.indexes: cls._plot_gabby_frame(ctx, idx)
@classmethod
def _plot_gabby_frame(cls, ctx, idx):
# Set up the figure and axes
fig = plt.figure(figsize=(12, 8))
fig.set_dpi(ctx.tgt.getint('dpi'))
fig.suptitle(ctx.tgt['name'], y=0.97, fontsize=25)
ax_g = fig.add_subplot(2, 1, 1)
ax_g.set_xlabel('Orbital Period (minutes)')
ax_g.set_ylabel('Orbital Altitude (km)')
ax_g.set_xlim(ctx.tgt.getint('min-orb-period'),
ctx.tgt.getint('max-orb-period'))
ax_g.set_ylim(ctx.tgt.getint('min-orb-alt'),
ctx.tgt.getint('max-orb-alt'))
ax_g.legend(ctx.legend_contents, ctx.legend_labels, loc=1)
ax_n = fig.add_subplot(2, 1, 2)
ax_n.set_xlim(ctx.data.start_d, ctx.data.end_d)
ax_n.set_ylim(0, ctx.tgt.getint('max-n-fragments'))
ax_n.set_ylabel('Number of Fragments')
ax_n.set_xlabel(ctx.tgt['copyright'])
logging.info(f" Preparing plot ({idx}/{ctx.data.N})")
# Plot the number of pieces
if ctx.fwd_prop_idx and idx >= ctx.fwd_prop_idx:
obs_idx = min(idx+1, ctx.fwd_prop_idx)
ax_n.plot(ctx.Xt[:obs_idx+1], ctx.data.Ns[:obs_idx+1],
color=ctx.perigee_color,
label='Observed')
ax_n.plot(ctx.Xt[obs_idx:idx+1], ctx.data.Ns[obs_idx:idx+1],
color=ctx.apogee_color,
label='Predicted')
props = dict(boxstyle='round',
facecolor=ctx.apogee_color,
alpha=0.6)
ax_g.text(0.75, 0.9, f"Predicted",
transform=ax_g.transAxes, bbox=props)
else:
ax_n.plot(ctx.Xt[:idx+1], ctx.data.Ns[:idx+1],
color=ctx.perigee_color,
label='Observed')
if ctx.fwd_prop_idx:
props = dict(boxstyle='round',
facecolor=ctx.perigee_color,
alpha=0.6)
ax_g.text(0.75, 0.9, f"Observed",
transform=ax_g.transAxes, bbox=props)
ax_n.plot([], [], color=ctx.apogee_color, label='Predicted')
if ctx.fwd_prop_idx: ax_n.legend(loc=1)
# Plot the comparators
for i in range(len(ctx.comp_X)):
ax_g.plot(ctx.comp_X[i], ctx.comp_Y[i], 'o',
color=ctx.comp_C[i], markersize=12)
# Plot the gabbards
ax_g.plot(ctx.data.Ts[idx], ctx.data.As[idx],
'.', color=ctx.apogee_color, markersize=6)
ax_g.plot(ctx.data.Ts[idx], ctx.data.Ps[idx],
'.', color=ctx.perigee_color, markersize=6)
# Futz with boundaries and location
fig.tight_layout(h_pad=2)
fig.subplots_adjust(top=0.9)
# Save everything
path = f"{ctx.img_dir}/%*.*d.png"%(len(str(ctx.data.N)),
len(str(ctx.data.N)),
idx)
fig.savefig(path)
logging.info(f" Figure saved to {path}")
fig.clf()
plt.close(fig)
gc.collect()
def plot(self, n_threads=1):
"""Produces the images, but not the video.
This will read the config file for all relevant information.
"""
logging.info(f"Plotting")
if 'gabby_plot_ctx' in self.cache:
logging.info(f" Loading context from cache")
ctx = self.cache['gabby_plot_ctx']
else:
logging.info(f" Building plot context")
ctx = GabbyPlotContext(tgt=self.tgt,
img_dir=self.img_dir,
data=self.data,
output_dir=self.output_dir)
logging.info(f" Loading data from DB")
ctx.fetch_from_db(self.db)
self.cache.put('gabby_plot_ctx', ctx, [])
ctx.data.apt = None
# We create a great many individual images
logging.info(f" Creating image directory: {ctx.img_dir}")
mkdir_p(ctx.img_dir)
logging.info(f" Preparing to build {ctx.data.N} images")
if n_threads > 1:
# Interleave generation so that they're generated roughly
# in order in parallel rather than one block at a time.
# [ 0, N+0, 2N+0, ...]
# [ 1, N+1, 2N+1, ...]
# ...
# [N-1, 2N-1, 3N-1, ...]
tmp = np.linspace(0, ctx.data.N-1, ctx.data.N, dtype=np.int)
indexes = [tmp[i::n_threads] for i in range(n_threads)]
# Can't stop the work...
work = []
for i in range(n_threads):
c = copy.deepcopy(ctx)
c.indexes = indexes[i]
work.append(c)
logging.info(f" Launching the pool with {n_threads} threads")
with multiprocessing.Pool(n_threads) as pool:
pool.map(GabbyPlotter._mp_gabby_plot, work)
else:
# One-time initialization per process
self._plt_setup(ctx)
for idx in range(ctx.data.N): self._plot_gabby_frame(ctx, idx)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# file:main.py
# author:Itsuka
# datetime:2021/8/26 14:33
# software: PyCharm
"""
this is function description
"""
import os
from codegen import codegen_layer, metadata, tables, project_dir, target_dir
from utils.common import new_file_or_dir
from . import record_delete_way, rsa_table_column
from .codegenerator import CodeGenerator
def controllerGenerate():
"""
Generate Controller code
:return: None
"""
# return, while codegen_layer is not 'default' or 'controller'
if codegen_layer not in ['default', 'controller']:
return
if not tables:
return
# create the target dir
new_file_or_dir(2, target_dir)
new_file_or_dir(2, project_dir)
controller_dir = os.path.join(project_dir, 'controller')
new_file_or_dir(2, controller_dir)
generator = CodeGenerator(metadata)
generator.controller_codegen(
delete_way=record_delete_way,
controller_dir=controller_dir,
rsa_table_column=rsa_table_column
)
return
|
current_obj(OBJ_SM, "my_state_machine")
current_obj(OBJ_MP, "EvtPool1")
current_obj(OBJ_AP, "my_object")
current_obj(OBJ_SM_AO, "Philo[2]")
current_obj(OBJ_AP, 0x20001234)
|
import logging
import json
import math
from configparser import NoOptionError
from gi.repository import Gtk, GObject
from lib.videodisplay import VideoDisplay
import lib.connection as Connection
from lib.config import Config
class VideoPreviewsController(object):
"""Displays Video-Previews and selection Buttons for them"""
def __init__(self, preview_box, win, uibuilder):
self.log = logging.getLogger('VideoPreviewsController')
self.preview_box = preview_box
self.win = win
self.sources = Config.getlist('mix', 'sources')
self.preview_players = {}
self.previews = {}
self.a_btns = {}
self.b_btns = {}
self.volume_sliders = {}
self.current_source = {'a': None, 'b': None}
try:
width = Config.getint('previews', 'width')
self.log.debug('Preview-Width configured to %u', width)
except NoOptionError:
width = 320
self.log.debug('Preview-Width selected as %u', width)
try:
height = Config.getint('previews', 'height')
self.log.debug('Preview-Height configured to %u', height)
except NoOptionError:
height = int(width * 9 / 16)
self.log.debug('Preview-Height calculated to %u', height)
# Accelerators
accelerators = Gtk.AccelGroup()
win.add_accel_group(accelerators)
group_a = None
group_b = None
# Check if there is a fixed audio source configured.
# If so, we will remove the volume sliders entirely
# instead of setting them up.
volume_control = \
Config.getboolean('audio', 'volumecontrol', fallback=True) or \
Config.getboolean('audio', 'forcevolumecontrol', fallback=False)
for idx, source in enumerate(self.sources):
self.log.info('Initializing Video Preview %s', source)
preview = uibuilder.get_check_widget('widget_preview', clone=True)
video = uibuilder.find_widget_recursive(preview, 'video')
video.set_size_request(width, height)
preview_box.pack_start(preview, fill=False,
expand=False, padding=0)
player = VideoDisplay(video, port=13000 + idx,
width=width, height=height)
uibuilder.find_widget_recursive(preview, 'label').set_label(source)
btn_a = uibuilder.find_widget_recursive(preview, 'btn_a')
btn_b = uibuilder.find_widget_recursive(preview, 'btn_b')
btn_a.set_name("%c %u" % ('a', idx))
btn_b.set_name("%c %u" % ('b', idx))
if not group_a:
group_a = btn_a
else:
btn_a.join_group(group_a)
if not group_b:
group_b = btn_b
else:
btn_b.join_group(group_b)
btn_a.connect('toggled', self.btn_toggled)
btn_b.connect('toggled', self.btn_toggled)
key, mod = Gtk.accelerator_parse('%u' % (idx + 1))
btn_a.add_accelerator('activate', accelerators,
key, mod, Gtk.AccelFlags.VISIBLE)
tooltip = Gtk.accelerator_get_label(key, mod)
btn_a.set_tooltip_text(tooltip)
key, mod = Gtk.accelerator_parse('<Ctrl>%u' % (idx + 1))
btn_b.add_accelerator('activate', accelerators,
key, mod, Gtk.AccelFlags.VISIBLE)
tooltip = Gtk.accelerator_get_label(key, mod)
btn_b.set_tooltip_text(tooltip)
volume_slider = uibuilder.find_widget_recursive(preview,
'audio_level')
if not volume_control:
box = uibuilder.find_widget_recursive(preview, 'box')
box.remove(volume_slider)
else:
volume_slider.set_name("volume {}".format(source))
volume_signal = volume_slider.connect('value-changed',
self.slider_changed)
def slider_format(scale, value):
if value == -20.0:
return "-\u221e\u202fdB"
else:
return "{:.{}f}\u202fdB".format(value,
scale.get_digits())
volume_slider.connect('format-value', slider_format)
self.volume_sliders[source] = (volume_slider, volume_signal)
self.preview_players[source] = player
self.previews[source] = preview
self.a_btns[source] = btn_a
self.b_btns[source] = btn_b
# connect event-handler and request initial state
Connection.on('video_status', self.on_video_status)
Connection.send('get_video')
if volume_control:
Connection.on('audio_status', self.on_audio_status)
Connection.send('get_audio')
def btn_toggled(self, btn):
if not btn.get_active():
return
btn_name = btn.get_name()
self.log.debug('btn_toggled: %s', btn_name)
channel, idx = btn_name.split(' ')[:2]
source_name = self.sources[int(idx)]
if self.current_source[channel] == source_name:
self.log.info('video-channel %s already on %s',
channel, source_name)
return
self.log.info('video-channel %s changed to %s', channel, source_name)
Connection.send('set_video_' + channel, source_name)
def slider_changed(self, slider):
slider_name = slider.get_name()
source = slider_name.split(' ')[1]
value = slider.get_value()
volume = 10 ** (value / 20) if value > -20.0 else 0
self.log.debug("slider_changed: {}: {:.4f}".format(source, volume))
Connection.send('set_audio_volume {} {:.4f}'.format(source, volume))
def on_video_status(self, source_a, source_b):
self.log.info('on_video_status callback w/ sources: %s and %s',
source_a, source_b)
self.current_source['a'] = source_a
self.current_source['b'] = source_b
self.a_btns[source_a].set_active(True)
self.b_btns[source_b].set_active(True)
def on_audio_status(self, *volumes):
volumes_json = "".join(volumes)
volumes = json.loads(volumes_json)
for source, volume in volumes.items():
volume = 20.0 * math.log10(volume) if volume > 0 else -20.0
slider, signal = self.volume_sliders[source]
# Temporarily block the 'value-changed' signal,
# so we don't (re)trigger it when receiving (our) changes
GObject.signal_handler_block(slider, signal)
slider.set_value(volume)
GObject.signal_handler_unblock(slider, signal)
|
from re import split
import joblib
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
from sb_capstone.shaping import (
_simplify_gender,
_transform_age_group,
_transform_generation,
_explode_membership_date,
_extract_age_bins,
_transform_gender
)
select_model = joblib.load("../models/select_offer.pkl")
receive_model = joblib.load("../models/receive_offer.pkl")
def train_receive_offer(data, file):
"""Trains data to create model to determine if a customer will receive an offer.
Args:
data (pandas.DataFrame): Data to train model on.
file (str): File to save model to.
Returns:
str: File where the model is saved.
dict: Classification report.
"""
y = data.purchased
X = data.drop(columns=["purchased"])
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = DecisionTreeClassifier(criterion="gini", splitter="random")
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
score = classification_report(y_test, y_pred, zero_division=True, output_dict=True)
joblib.dump(clf, file)
return file, score
def train_select_offer(data, file):
"""Trains data to create model to determine which offers to show to a customer.
Args:
data (pandas.DataFrame): Data to train model on.
file (str): File to save model to.
Returns:
str: File where the model is saved.
dict: Classification report.
"""
y_cols = np.arange(1, 11).astype(str).tolist()
y = data[y_cols]
X = data[data.columns[~data.columns.isin(y_cols)]]
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = MultiOutputClassifier(
DecisionTreeClassifier(criterion="gini", splitter="random"),
)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
score = classification_report(y_test, y_pred, zero_division=True, output_dict=True)
joblib.dump(clf, file)
return file, score
def _convert_for_select(profile):
"""Convert profile to be fed into the select model.
Args:
profile (pandas.DataFrame): Profile to convert.
Returns:
pandas.DataFrame: Converted profile.
"""
without_profile = profile[profile.age.isna()].reset_index(drop=True)
profile = profile[~profile.age.isna()].reset_index(drop=True)
profile = _simplify_gender(
_explode_membership_date(profile))
return profile, without_profile
def select_offer(profile, model = select_model, default_offers = []):
"""Predict which offers to show to a customer.
Args:
profile (pandas.DataFrame): Profile to predict offers for.
model (sklearn.model_selection.Model): Model to use to predict offers.
default_offers (list): Default offers to show to a customer who are anonymous.
Returns:
pandas.DataFrame: Profile with offers.
"""
profile, without_profile = _convert_for_select(profile)
offer_cols = np.arange(1, 11).astype(str).tolist()
profile[offer_cols] = np.zeros(10, dtype=int).tolist()
if len(profile) > 0:
cols = [
"gender",
"age",
"income",
"membership_year",
"membership_month",
"membership_day"
]
y = pd.DataFrame(model.predict(profile[cols]), columns=offer_cols)
profile[offer_cols] = y
profile = profile[["id"] + offer_cols]
profile = pd.melt(profile, id_vars="id", value_vars=np.arange(1, 11).astype(str).tolist(), var_name="recommended_offers")
profile = profile[profile.value == 1]
profile = profile.groupby("id").agg({"recommended_offers": lambda x: x.tolist()}).reset_index()
without_profile["recommended_offers"] = [default_offers] * without_profile.shape[0]
without_profile = without_profile[["id", "recommended_offers"]]
results = pd.concat([profile, without_profile])
return results
def _convert_for_receive(profile):
"""Convert profile to be fed into the receive model.
Args:
profile (pandas.DataFrame): Profile to convert.
Returns:
pandas.DataFrame: Converted profile.
"""
without_profile = profile[profile.age.isna()].reset_index(drop=True)
profile = profile[~profile.age.isna()].reset_index(drop=True)
profile = _transform_age_group(
_transform_generation(
_transform_gender(
_explode_membership_date(
_extract_age_bins(
profile)))))
return profile, without_profile
def receive_offer(profile, model = receive_model, default_value=pd.NA):
"""Predict whether the customer should receive an offer.
Args:
profile (pandas.DataFrame): Profile to predict offers for.
model (sklearn.model_selection.Model): Model to use to predict offers.
default_value (str): Default value to use if the customer is anonymous.
Returns:
pandas.DataFrame: Profile with offers.
"""
profile, without_profile = _convert_for_receive(profile)
profile["receive_offer"] = False
cols = [
"gender",
"age",
"income",
"membership_year",
"membership_month",
"membership_day",
"gen_z",
"millenials",
"gen_x",
"boomers",
"silent",
"young",
"adult",
"middle_age",
"old"
]
if len(profile) > 0:
y = model.predict(profile[cols])
profile["receive_offer"] = y
profile.receive_offer = profile.receive_offer.apply(lambda x: True if x==1.0 else False)
profile = profile[["id", "receive_offer"]]
without_profile["receive_offer"] = default_value
without_profile = without_profile[["id", "receive_offer"]]
results = pd.concat([profile, without_profile]).sort_values("id").reset_index(drop=True)
return results
|
import csv
from collections import defaultdict
import StringIO
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from jinja_template import JinjaTemplating
from google.appengine.ext import db
from google.appengine.api import memcache
from xml.dom import minidom
from google_spreadsheet import api
from google_spreadsheet.api import SpreadsheetAPI
from models.questions import Questions
from models.track_questions_details import QuestionsDetails
import json
import re
from models.pegasus_model import BbgDemoModel
from models.dynamic_model_properties import DynamicModelsProperties
from models.list_of_surveys import ListOfSurveys
class QuestionsDetailsFromGoogleSheet(JinjaTemplating):
def get(self):
JinjaTemplating.render_template_only(self, 'aliu_test.html')
def post(self):
posted_json = json.loads(self.request.body)
# posted_json = self.request.body
form_id = posted_json['form_id']
survey_meta_details = posted_json['settings']['column_names']
survey_details = posted_json['settings']['elements']
# self.response.out.write(survey_name + '\n')
# return
survey_xls_columns = posted_json['survey']['column_names']
survey_questions_in_xls = posted_json['survey']['elements']
choices_xls_columns = posted_json['choices']['column_names']
possible_choices_xls = posted_json['choices']['elements']
survey_name = survey_details[0]['form_title']
# retrieve_user_surveys = db.Query(ListOfSurveys)
retrieve_user_surveys = ListOfSurveys.all()
retrieve_user_surveys.filter('survey_name', survey_name)
retrieve_user_surveys.filter("survey_aggregate_form_id =", form_id)
if_survey_exist = retrieve_user_surveys.get()
# self.response.out.write("Checking to see list of surveys \n")
# self.response.out.write(if_survey_exist)
# return
if if_survey_exist is None:
# these are use to get the various columns in surveys worksheet as array indexes
type_of_data = 0
name_of_db_field = 1
question_text = 2
# these are use to get the various columns in choices worksheet as array indexes
option_list_name = 0
option_value = 1
option_label = 2
# This enable me to save the question details into the question details model
possible_answers_values = ''
possible_answers_labels = ''
for question in survey_questions_in_xls:
question_data_type = question[survey_xls_columns[type_of_data]]
question_db_field_name = question[survey_xls_columns[name_of_db_field]]
question_text_to_display= question[survey_xls_columns[question_text]]
# searching to see whether the word 'select' exist in the question_data_type
search_to_see_select = re.search( r'select', question_data_type)
if search_to_see_select:
# setting a database field
# setattr(self, question_db_field_name, db.StringProperty())
question_type_split = question_data_type.split()
get_list_name_in_survey = question_type_split[1]
# self.response.out.write('We found the word "select" in: ' + question_data_type + '\n')
# self.response.out.write(question_data_type + '\n')
# self.response.out.write(question_db_field_name + '\n')
# self.response.out.write(question_text_to_display + '\n')
for answer_option in possible_choices_xls:
answers_for_a_question_listname = answer_option[choices_xls_columns[option_list_name]]
answer_value = answer_option[choices_xls_columns[option_value]]
answer_text_to_display = answer_option[choices_xls_columns[option_label]]
# comparing the listname in the choices worksheet to the splited data type position one which gives a list name
if answers_for_a_question_listname == get_list_name_in_survey:
possible_answers_values += answer_value + ','
possible_answers_labels += answer_text_to_display + ','
# self.response.out.write('Possible Answers Values: ' + possible_answers_values + '\n')
# self.response.out.write('Possible Answers Labels: ' + possible_answers_labels + '\n')
# GqlQuery interface constructs a query using a GQL query string
# finding_if_question_has_been_saved = db.GqlQuery("SELECT * FROM QuestionsDetails " +
# "WHERE survey_name = :1 AND question_db_field_name <= :2 ", survey_name,question_db_field_name)
finding_if_question_has_been_saved = QuestionsDetails.all().filter('survey_name =', survey_name).filter('question_db_field_name =', question_db_field_name)
# finding_if_question_has_been_saved = db.Query(QuestionsDetails)
# getting the first match element of the query property model
finding_if_question_has_been_saved = finding_if_question_has_been_saved.get()
# for result in finding_if_question_has_been_saved:
# self.response.out.write(result)
# return
# if not v.get():
# if question has not been saved, do nothing
if finding_if_question_has_been_saved is None:
# saving the question details into the question details table
question_type = 'close_ended'
insert_a_new_question_details = Questions(survey_name = survey_name, question = question_text_to_display, question_field = question_db_field_name, possible_answers = possible_answers_values, possible_answers_labels = possible_answers_labels, question_type = question_type)
insert_a_new_question_details.put()
# save the newly inserted question into tracking question details table so we know which questions details have been saved
insert_a_new_tracking_question_details = QuestionsDetails(survey_name = survey_name, question_db_field_name = question_db_field_name)
insert_a_new_tracking_question_details.put()
# resetting the variables
possible_answers_values = ''
possible_answers_labels = ''
else:
print("Question Already Saved")
# if question has been saved
self.response.out.write('Field Already saved in db')
else:
finding_if_question_has_been_saved = QuestionsDetails.all().filter('survey_name =', survey_name).filter('question_db_field_name =', question_db_field_name)
# finding_if_question_has_been_saved = db.Query(QuestionsDetails)
# getting the first match element of the query property model
finding_if_question_has_been_saved = finding_if_question_has_been_saved.get()
# if question has not been saved save it
if finding_if_question_has_been_saved is None:
# if questions details have not been saved already and it is open ended
# Saving question details into the questions details table
question_type = 'open_ended'
# insert_a_new_question_deatils = Questions(question = question_text_to_display, question_field = question_db_field_name, possible_answers = possible_answers_values, possible_answers_labels = possible_answers_labels, question_type = question_type)
insert_a_new_question_details = Questions(survey_name = survey_name, question = question_text_to_display, question_field = question_db_field_name, possible_answers = 'No possible answers',question_type = question_type)
insert_a_new_question_details.put()
# save the newly inserted question into tracking question details table so we know which questions details have been saved
insert_a_new_tracking_question_details = QuestionsDetails(survey_name = survey_name, question_db_field_name = question_db_field_name)
insert_a_new_tracking_question_details.put()
else:
print("Question Already Saved")
# if question has been saved
self.response.out.write('Field Already saved in db')
insert_a_new_survey = ListOfSurveys(survey_name = survey_name, survey_aggregate_form_id = form_id)
insert_a_new_survey.put()
survey_builder_successful = insert_a_new_survey.key().id()
data = {}
if survey_builder_successful is None:
# if survey details saved
build_status = 'failed'
message = 'Built process interupted'
data = {'build_status': build_status, 'message': message }
data_returned_to_front_end = json.dumps(data)
self.response.out.write(data_returned_to_front_end)
return
else:
# if survey details saved
build_status = 'success'
message = 'Built succesful'
data = {'build_status': build_status, 'message': message }
data_returned_to_front_end = json.dumps(data)
self.response.out.write(data_returned_to_front_end)
return
else:
# if survey alredy was built on this instance (server)
build_status = 'failed'
message = 'Survey already exist on this server instance. Hint: change survey form id'
data = {'build_status': build_status, 'message': message }
data_returned_to_front_end = json.dumps(data)
self.response.out.write(data_returned_to_front_end)
return
|
local_variable = 42
print(lo<caret>)
|
from lram_reader import get_lram_location_in_frame_data
import re
def set_lram_value_in_bit_file(lutrams, lram_x, lram_y, lram_l, lram_width, lram_value, bit_file, start_byte, start_word_index=0):
word_index, bit_index = get_lram_location_in_frame_data(lutrams, lram_x, lram_y, lram_l, start_word_index)
# Loop on the bits of the lram
for i in range(int(len(word_index)/lram_width)):
for j in range(lram_width):
# Calculate the word offset inside the file in bytes (skipping the header bytes)
word_offset = (start_byte) + (word_index[i * lram_width + j] * 4)
# Jump to this word and read it
bit_file.seek(word_offset)
word = bytearray(bit_file.read(4))
# Get the byte we need to modify
byte_offset = (3 - (bit_index[i * lram_width + j] >> 3))
byte = word[byte_offset]
# Bit manipulate that byte
bit_value = (lram_value[i] >> j) & 0x1
if bit_value == 0:
byte = byte & ~(1 << (bit_index[i * lram_width + j] % 8))
else:
byte = byte | (1 << (bit_index[i * lram_width + j] % 8))
word[byte_offset] = byte
# Overwrite the word after the modification
bit_file.seek(word_offset)
bit_file.write(bytes(word))
def set_named_lram_value_in_bit_file(lutrams, rams, lram_name, lram_value, bit_file, start_byte, start_word_index=0):
# Get info about the lram from its name
lram_name_stripped = lram_name[:lram_name.rfind('/')]
ram_info = rams[lram_name_stripped]
lram_type = ram_info.ram_type
xy = ram_info.ram_xy
lram_bel = ram_info.ram_bel
# Get the location of this lram in the partial bitstream file
x = int(re.split("Y", xy.lstrip('X'), 0)[0])
y = int(re.split("Y", xy.lstrip('X'), 0)[1])
# Check which LUT6 in the 8 LUTs of this lram should be updated
if lram_type == 'RAM64M8' or lram_type == 'RAM64M' or lram_type == 'RAM32M16':
lut = lram_name[-1]
elif lram_type == 'RAM32M':
if lram_bel[0] == 'H':
lut = chr(ord(lram_name[-1]) + 4)
else:
lut = lram_name[-1]
else:
lut = lram_bel[0]
l = lut.upper()
word_index, bit_index = get_lram_location_in_frame_data(lutrams, x, y, l, start_word_index)
# Check which LUT5 (the lower or the upper) of this LUT6 should be updated
if lram_type == 'SRL16E' or lram_type == 'RAM32X1S':
if lram_bel[1] == '5':
word_index = word_index[0:32]
bit_index = bit_index[0:32]
elif lram_bel[1] == '6':
word_index = word_index[32:]
bit_index = bit_index[32:]
# The value of an RAM32M is constructed from one bit from the least 32 bits then one bit from th most 32 bits and so on
if lram_type == 'RAM32M16' or lram_type == 'RAM32M':
lram_value_bin = "{:064b}".format(lram_value, 'b')
lut_value_bin = lram_value_bin[-64] + lram_value_bin[-62] + lram_value_bin[-60] + lram_value_bin[-58] + lram_value_bin[-56] + lram_value_bin[-54] + lram_value_bin[-52] + lram_value_bin[-50] + lram_value_bin[-48] + lram_value_bin[-46]+ lram_value_bin[-44] + lram_value_bin[-42] + lram_value_bin[-40] + lram_value_bin[-38] + lram_value_bin[-36] + lram_value_bin[-34] + lram_value_bin[-32] + lram_value_bin[-30] + lram_value_bin[-28] + lram_value_bin[-26] + lram_value_bin[-24] + lram_value_bin[-22] + lram_value_bin[-20] + lram_value_bin[-18] + lram_value_bin[-16] + lram_value_bin[-14]+ lram_value_bin[-12] + lram_value_bin[-10] + lram_value_bin[-8] + lram_value_bin[-6] + lram_value_bin[-4] + lram_value_bin[-2] + lram_value_bin[-63] + lram_value_bin[-61] + lram_value_bin[-59] + lram_value_bin[-57] + lram_value_bin[-55] + lram_value_bin[-53] + lram_value_bin[-51] + lram_value_bin[-49] + lram_value_bin[-47] + lram_value_bin[-45]+ lram_value_bin[-43] + lram_value_bin[-41] + lram_value_bin[-39] + lram_value_bin[-37] + lram_value_bin[-35] + lram_value_bin[-33] + lram_value_bin[-31] + lram_value_bin[-29] + lram_value_bin[-27] + lram_value_bin[-25] + lram_value_bin[-23] + lram_value_bin[-21] + lram_value_bin[-19] + lram_value_bin[-17] + lram_value_bin[-15] + lram_value_bin[-13]+ lram_value_bin[-11] + lram_value_bin[-9] + lram_value_bin[-7] + lram_value_bin[-5] + lram_value_bin[-3] + lram_value_bin[-1]
lut_value = int(lut_value_bin, 2)
# The value of an SRL16E is constructed from the odd bits of LUT5 32 bits
elif lram_type == 'SRL16E':
lram_value_bin = "{:016b}".format(lram_value, 'b')
lut_value_bin = lram_value_bin[-16] + '0' + lram_value_bin[-15] + '0' + lram_value_bin[-14] + '0' + lram_value_bin[-13] + '0' + lram_value_bin[-12] + '0' + lram_value_bin[-11] + '0' + lram_value_bin[-10] + '0' + lram_value_bin[-9] + '0' + lram_value_bin[-8] + '0' + lram_value_bin[-7] + '0' + lram_value_bin[-6] + '0' + lram_value_bin[-5] + '0' + lram_value_bin[-4] + '0' + lram_value_bin[-3] + '0' + lram_value_bin[-2] + '0' + lram_value_bin[-1] + '0'
lut_value = int(lut_value_bin, 2)
# The value of an SRLC32E is constructed from the odd bits of LUT6 64 bits
elif lram_type == 'SRLC32E':
lram_value_bin = "{:032b}".format(lram_value, 'b')
lut_value_bin = lram_value_bin[-32] + '0' + lram_value_bin[-31] + '0' + lram_value_bin[-30] + '0' + lram_value_bin[-29] + '0' + lram_value_bin[-28] + '0' + lram_value_bin[-27] + '0' + lram_value_bin[-26] + '0' + lram_value_bin[-25] + '0' + lram_value_bin[-24] + '0' + lram_value_bin[-23] + '0' + lram_value_bin[-22] + '0' + lram_value_bin[-21] + '0' + lram_value_bin[-20] + '0' + lram_value_bin[-19] + '0' + lram_value_bin[-18] + '0' + lram_value_bin[-17] + '0' + lram_value_bin[-16] + '0' + lram_value_bin[-15] + '0' + lram_value_bin[-14] + '0' + lram_value_bin[-13] + '0' + lram_value_bin[-12] + '0' + lram_value_bin[-11] + '0' + lram_value_bin[-10] + '0' + lram_value_bin[-9] + '0' + lram_value_bin[-8] + '0' + lram_value_bin[-7] + '0' + lram_value_bin[-6] + '0' + lram_value_bin[-5] + '0' + lram_value_bin[-4] + '0' + lram_value_bin[-3] + '0' + lram_value_bin[-2] + '0' + lram_value_bin[-1] + '0'
lut_value = int(lut_value_bin, 2)
else:
lut_value = lram_value
# Loop on the bits of the lut
for i in range(len(word_index)):
# Calculate the word offset inside the file in bytes (skipping the header bytes)
word_offset = (start_byte) + (word_index[i] * 4)
# Jump to this word and read it
bit_file.seek(word_offset)
word = bytearray(bit_file.read(4))
# Get the byte we need to modify
byte_offset = (3 - (bit_index[i] >> 3))
byte = word[byte_offset]
# Bit manipulate that byte
bit_value = (lut_value >> i) & 0x1
if bit_value == 0:
byte = byte & ~(1 << (bit_index[i] % 8))
else:
byte = byte | (1 << (bit_index[i] % 8))
word[byte_offset] = byte
# Overwrite the word after the modification
bit_file.seek(word_offset)
bit_file.write(bytes(word))
|
import pandas as pd
df = pd.read_csv("extracted_vim.txt")
df = df.drop_duplicates()
print(df)
df.to_csv(r'pandas_dropped_duplicates.txt', header=None, index=None, sep=' ', mode='a')
|
import numpy as np
import torch
import torch.nn.functional as F
import json
import os
import copy
from sklearn.metrics import average_precision_score, roc_auc_score
from .. import builder
from .. import gloria
from pytorch_lightning.core import LightningModule
class ClassificationModel(LightningModule):
"""Pytorch-Lightning Module"""
def __init__(self, cfg):
"""Pass in hyperparameters to the model"""
# initalize superclass
super().__init__()
self.cfg = cfg
if self.cfg.model.vision.model_name in gloria.available_models():
self.model = gloria.load_img_classification_model(
self.cfg.model.vision.model_name,
num_cls=self.cfg.model.vision.num_targets,
freeze_encoder=self.cfg.model.vision.freeze_cnn,
)
else:
self.model = builder.build_img_model(cfg)
self.loss = builder.build_loss(cfg)
self.lr = cfg.lightning.trainer.lr
self.dm = None
def configure_optimizers(self):
optimizer = builder.build_optimizer(self.cfg, self.lr, self.model)
scheduler = builder.build_scheduler(self.cfg, optimizer, self.dm)
return {"optimizer": optimizer, "lr_scheduler": scheduler}
def training_step(self, batch, batch_idx):
return self.shared_step(batch, "train")
def validation_step(self, batch, batch_idx):
return self.shared_step(batch, "val")
def test_step(self, batch, batch_idx):
return self.shared_step(batch, "test")
def training_epoch_end(self, training_step_outputs):
return self.shared_epoch_end(training_step_outputs, "train")
def validation_epoch_end(self, validation_step_outputs):
return self.shared_epoch_end(validation_step_outputs, "val")
def test_epoch_end(self, test_step_outputs):
return self.shared_epoch_end(test_step_outputs, "test")
def shared_step(self, batch, split):
"""Similar to traning step"""
x, y = batch
logit = self.model(x)
loss = self.loss(logit, y)
log_iter_loss = True if split == "train" else False
self.log(
f"{split}_loss",
loss,
on_epoch=True,
on_step=log_iter_loss,
logger=True,
prog_bar=True,
)
return_dict = {"loss": loss, "logit": logit, "y": y}
return return_dict
def shared_epoch_end(self, step_outputs, split):
logit = torch.cat([x["logit"] for x in step_outputs])
y = torch.cat([x["y"] for x in step_outputs])
prob = torch.sigmoid(logit)
y = y.detach().cpu().numpy()
prob = prob.detach().cpu().numpy()
auroc_list, auprc_list = [], []
for i in range(y.shape[1]):
y_cls = y[:, i]
prob_cls = prob[:, i]
if np.isnan(prob_cls).any():
auprc_list.append(0)
auroc_list.append(0)
else:
auprc_list.append(average_precision_score(y_cls, prob_cls))
auroc_list.append(roc_auc_score(y_cls, prob_cls))
auprc = np.mean(auprc_list)
auroc = np.mean(auroc_list)
self.log(f"{split}_auroc", auroc, on_epoch=True, logger=True, prog_bar=True)
self.log(f"{split}_auprc", auprc, on_epoch=True, logger=True, prog_bar=True)
if split == "test":
results_csv = os.path.join(self.cfg.output_dir, "results.csv")
results = {"auorc": auroc, "auprc": auprc}
with open(results_csv, "w") as fp:
json.dump(results, fp)
|
import os.path as osp
import pytest
import pandas as pd
from sqlalchemy import create_engine
from pagai.services.database_explorer import get_sql_url, table_exists
from tests.settings import DATABASES
def get_test_data_path(filename: str) -> str:
this_directory = osp.dirname(osp.realpath(__file__))
return osp.join(this_directory, "data", filename)
@pytest.fixture(scope="session", params=list(DATABASES.keys()))
def db_config(request):
db_driver = request.param
db_config = DATABASES[db_driver]
sql_engine = create_engine(get_sql_url(db_driver, db_config))
# Load (or reload) test data into db.
load_table(sql_engine, "patients", "patients.csv")
load_table(sql_engine, "UPPERCASE", "patients-uppercase.csv")
load_table(sql_engine, "CaseSensitive", "patients-case-sensitive.csv")
db_config["model"] = db_driver
return db_config
def load_table(sql_engine, table_name, data_file):
data = pd.read_csv(
get_test_data_path(data_file), sep=",", encoding="utf-8", parse_dates=["date"]
)
# Use custom check if table exists instead of pandas feature
# df.to_sql(if_exists='replace') because it uses reflection on Oracle and
# it's very slow.
exists, table = table_exists(sql_engine, table_name)
if exists:
table.drop(sql_engine)
print("dropped existing test table:", table_name)
data.to_sql(name=table_name, con=sql_engine)
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Place)
admin.site.register(Cheese)
|
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam import window
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
def run(argv=None):
"""Main entry point; defines and runs the wordcount pipeline."""
pubsubTopicName = "projects/data-qe-da7e1252/topics/sk-firewall-json"
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
# CHANGE 1/5: The Google Cloud Storage path is required
# for outputting the results.
#default='gs://YOUR_OUTPUT_BUCKET/AND_OUTPUT_PREFIX',
#default="/Users/skanabargi/python/stream/output",
default='gs://data-qe-da7e1252/tmp/sk_out',
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_args.extend([
# CHANGE 2/5: (OPTIONAL) Change this to DataflowRunner to
# run your pipeline on the Google Cloud Dataflow Service.
'--runner=DataflowRunner',
# CHANGE 3/5: Your project ID is required in order to run your pipeline on
# the Google Cloud Dataflow Service.
'--project=data-qe-da7e1252',
# CHANGE 4/5: Your Google Cloud Storage path is required for staging local
# files.
#'--staging_location=gs://YOUR_BUCKET_NAME/AND_STAGING_DIRECTORY',
'--staging_location=gs://data-qe-da7e1252/tmp/stage/',
# CHANGE 5/5: Your Google Cloud Storage path is required for temporary
# files.
#'--temp_location=gs://YOUR_BUCKET_NAME/AND_TEMP_DIRECTORY',
'--temp_location=gs://data-qe-da7e1252/tmp/local',
'--experiments=allow_non_updatable_job',
'--job_name=sk-pubsub-to-gcs-5',
])
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
pipeline_options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection.
#lines = p | ReadFromText(known_args.input)
lines = p | beam.io.ReadFromPubSub(topic=pubsubTopicName)
# Count the occurrences of each word.
output = ( lines | 'window' >> beam.WindowInto(window.FixedWindows(60)))
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'writeTOGcs' >> WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.apps import apps
from django.apps import AppConfig
from django.conf import settings
from django.core.checks import register
from django.core.checks import Error
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils.module_loading import import_module
from djangobmf.conf import settings as bmfsettings
import logging
logger = logging.getLogger(__name__)
class BMFConfig(AppConfig):
name = 'djangobmf'
label = bmfsettings.APP_LABEL
verbose_name = "Django BMF"
def ready(self):
self.bmf_modules = []
from djangobmf.core.site import Site
self.site = Site(namespace=self.label, app_name=self.label)
class ModuleTemplate(AppConfig):
bmf_label = bmfsettings.APP_LABEL
def ready(self):
# if ready was already called
if hasattr(self, 'bmf_config'): # pragma: no cover
return True
self.bmf_config = apps.get_app_config(self.bmf_label)
if not hasattr(self.bmf_config, 'site'): # pragma: no cover
raise ImproperlyConfigured(
"Can not find a site attribute in %(cls)s. "
"Please import the BMF-Framework before you "
"import any BMF-Modules in your INSTALLED_APPS." % {
'cls': self.bmf_config.__class__.__name__
}
)
# autodiscover bmf modules ============================================
if module_has_submodule(self.module, "bmf_module"): # pragma: no branch
# load instructions of bmf_module.py
import_module('%s.%s' % (self.name, "bmf_module"))
# see if model needs a number_cycle
for model in [m for m in self.models.values() if hasattr(m, '_bmfmeta') and m._bmfmeta.number_cycle]:
self.bmf_config.site.register_numbercycle(model)
logger.debug('App "%s" (%s) is ready' % (
self.verbose_name,
self.label,
))
class ContribTemplate(ModuleTemplate):
verbose_name = "Django BMF Contrib"
class CurrencyTemplate(ModuleTemplate):
verbose_name = "Django BMF Currency"
class ReportTemplate(ModuleTemplate):
verbose_name = "Django BMF Report"
# Checks
@register()
def checks(app_configs, **kwargs): # noqa
errors = []
if not apps.is_installed('django.contrib.admin'): # pragma: no cover
errors.append(Error(
'django.contrib.admin not found',
hint="Put 'django.contrib.admin' in your INSTALLED_APPS setting",
id='djangobmf.E001',
))
if not apps.is_installed('django.contrib.contenttypes'): # pragma: no cover
errors.append(Error(
'django.contrib.contenttypes not found',
hint="Put 'django.contrib.contenttypes' in your INSTALLED_APPS setting",
id='djangobmf.E002',
))
if 'django.contrib.auth.context_processors.auth' not in settings.TEMPLATE_CONTEXT_PROCESSORS: # pragma: no cover
errors.append(Error(
'django.contrib.auth.context_processors not found',
hint="Put 'django.contrib.auth.context_processors' in your TEMPLATE_CONTEXT_PROCESSORS setting",
id='djangobmf.E003',
))
return errors
|
# coding: utf-8
# Setup Raspberry Pi
#
# Script used on freshly installed raspbrien install on a Raspberry Pi.
# Installs IPython Notebook
# Gets .ssh folder from local server
# Copy folders from local server.
# Home, Github code, sellcoffee,
#
<<<<<<< HEAD
=======
# .gimp-2.8
# .ipython
#
#
>>>>>>> 33339176764b46364df5f23a3c832970688bf476
# Install Python script requirments
# (pip arrow/praw/requests etc).
# Install from local server
#
<<<<<<< HEAD
#
# In[1]:
=======
# Get public ssh key from local web server that allows remote machine to ssh into new pi and copy req folders.
#
# copy interfaces file accross - and assign a
# new number. Also create new colour hostname.
#
# How hostnames work.
# Dict that has all current hostnames with their static ip.
# Dict that has all future hostnames with their static ip.
#
# when new machine added remove hostname/static ip from future hostnames and add to current hostnames.
#
# 32 - zinkwhite
# 33 - corlsbudcayon
# 34 - naplesyellow
# 35 - etc..
#
# Need to get a colour generator script.
#
#
# In[ ]:
# In[394]:
>>>>>>> 33339176764b46364df5f23a3c832970688bf476
#ssh into the pi and start doing commands.
<<<<<<< HEAD
# In[2]:
import shutil
# In[3]:
import os
=======
# In[446]:
import shutil
import colourlovers
import os
from colourlovers import ColourLovers
import requests
import json
# In[ ]:
# In[396]:
#from colourlovers import ColourLovers
#cl = ColourLovers()
#cl.color('#37cbff')
#c = ColourLovers
# In[397]:
cl = ColourLovers()
#Better to go with the random. Need to generate
#liscolor = cl.colors('top', numResults=50, result_offset=50)
liscolor = cl.colors('random')
# In[398]:
hostnamecolor = list()
# In[399]:
#From the examples - get the hex num. fill image with color. save.
#from PIL import Image
#im = Image.open("/home/wcmckee/test.jpg")
# In[400]:
#os.chdir('/home/wcmckee/colortest/')
# In[401]:
#Image.new('RGB', (100,100), color=lisc.hex)#
# In[402]:
colordict = dict()
# In[403]:
urld = dict()
# In[404]:
for lisc in liscolor:
print lisc.title
#myimg = Image.new('RGB', (100,100), color=lisc.hex)
# myimg.save('/home/wcmckee/colortest/' + (lisc.title), format='png')
print lisc.hsv
print lisc.description
colordict.update({'title' : lisc.title, 'description' : lisc.description})
colordict.update({'hex' : lisc.hex})
urld.update({'url' : lisc.url})
hostnamecolor.append(lisc.title)
# In[405]:
print colordict.values()
# In[406]:
#print colordict['url']
# In[407]:
hoslisf = []
repls = []
lowlisf = []
# In[408]:
for mec in hostnamecolor:
#mec.replace(' ', '-')
mec.replace(' ', '-')
hoslisf.append(mec.replace(' ', '-'))
# In[409]:
for hosz in hoslisf:
lowlisf.append(hosz.lower())
print hosz
# In[452]:
for hosa in lowlisf:
print hosa
repls.append(hosa.replace("'", ''))
colordict.update({'ftitle' : hosa})
# In[411]:
repls
# In[463]:
os.chdir('/home/wcmckee/colortest/')
# In[464]:
res = requests.get(urld['url'], stream=True)
with open(str(colordict['title'].replace(' ', '-')) + '.png', 'wb') as outfil:
shutil.copyfileobj(res.raw, outfil)
del res
# In[465]:
lowlisf
# In[414]:
colordict.update({'locurl' : ('/home/wcmckee/colortest/' + colordict['title'].replace(' ', '-')) + '.png'})
# In[415]:
colordict
# In[416]:
#from IPython.display import Image
#Image(filename= colordict['locurl'])
# In[417]:
sthex = colordict['hex'].replace('#', '')
# In[418]:
sthex
# In[419]:
intens = []
absens = []
midtens = []
# In[420]:
for schr in sthex:
print schr, schr.isalpha()
#if schr ==
if schr.isalpha():
intens.append(schr)
# In[421]:
intens
# In[422]:
for schr in sthex:
print schr, schr.isdigit()
if schr.isdigit():
absens.append(schr)
# In[423]:
for rabse in absens:
print rabse
if int(rabse) >= 5:
#print 'mid
print 'mid'
midtens.append(rabse)
# In[458]:
lownumza = []
# In[459]:
for rabse in absens:
print rabse
if int(rabse) <= 5:
#print 'mid
print 'low'
lownumza.append(rabse)
# In[424]:
print absens
# In[425]:
for mit in midtens:
print mit
# In[426]:
#absenc = set(absens) - set(midtens)
# In[427]:
#absenc
# In[428]:
#midlis = list(absenc)
# In[429]:
leabs = len(lownumza)
leints = len(intens)
lemid = len(midtens)
# In[439]:
#leabs
colordict.update({'absens-num' : leabs, 'midsens-num' : lemid})
# In[460]:
leabs
# In[461]:
lemid
# In[462]:
leints
# In[441]:
colordict.update({'intense-num' : leints})
# In[442]:
midtens
# In[443]:
if leabs < leints:
print 'intense'
print leints
colordict.update({'generalfeel' : 'intense'})
# In[ ]:
# In[444]:
if leabs > leints:
print 'absense'
print leabs
colordict.update({'generalfeel' : 'absense'})
# In[453]:
colordict
# In[454]:
jsnfil = json.dumps(colordict)
# In[455]:
jsnfil
# In[457]:
opjsf = open('/home/wcmckee/colortest/' + colordict['ftitle'] + '.json', 'w')
opjsf.write(jsnfil)
opjsf.close()
# In[471]:
#prehost = '192.168.1.'
#lisofip = []
# In[ ]:
#These are all the ip addresses i want to dish out.
#address is in a file and when assigned to a new
#hostmachine, the address is removed.
#Lets do it now.
# In[494]:
#opipfza = open('/home/wcmckee/colortest/freeip.txt', 'w')
#opipfza.write(str(lisofip))
# In[ ]:
# In[495]:
#for lispo in lisofip:
# print lispo
# opipfza.write(str(lispo))
# opipfza.write('\n')
# In[496]:
#opipfza.close()
# In[497]:
#for hosips in range(130,180):
# print prehost + str(hosips)
# lisofip.append(prehost + str(hosips))
# In[ ]:
# lookup hex code and tell me what color it is
# In[4]:
#Fetches home dir from local server backup.
#This is the backup pi.
#os.system('rsync -azP wcmckee@192.168.1.8:/home/ /home/')
>>>>>>> 33339176764b46364df5f23a3c832970688bf476
# In[ ]:
<<<<<<< HEAD
os.chdir('/home/wcmckee/ipython/')
=======
#os.chdir('/home/wcmckee/ipython/')
>>>>>>> 33339176764b46364df5f23a3c832970688bf476
# In[ ]:
<<<<<<< HEAD
os.system('sudo python setup.py install')
=======
#s.system('sudo python setup.py install')
>>>>>>> 33339176764b46364df5f23a3c832970688bf476
|
import logging
import argparse
import os
import sys
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
from pytorchBaselines.a2c_ppo_acktr.envs import make_vec_envs
from pytorchBaselines.evaluation import evaluate
from crowd_sim import *
from pytorchBaselines.a2c_ppo_acktr.model import Policy
def main():
# the following parameters will be determined for each test run
parser = argparse.ArgumentParser('Parse configuration file')
# the model directory that we are testing
parser.add_argument('--model_dir', type=str, default='data/example_model')
parser.add_argument('--visualize', default=False, action='store_true')
# if -1, it will run 500 different cases; if >=0, it will run the specified test case repeatedly
parser.add_argument('--test_case', type=int, default=-1)
# model weight file you want to test
parser.add_argument('--test_model', type=str, default='27776.pt')
test_args = parser.parse_args()
from importlib import import_module
model_dir_temp = test_args.model_dir
if model_dir_temp.endswith('/'):
model_dir_temp = model_dir_temp[:-1]
# import config class from saved directory
# if not found, import from the default directory
try:
model_dir_string = model_dir_temp.replace('/', '.') + '.configs.config'
model_arguments = import_module(model_dir_string)
Config = getattr(model_arguments, 'Config')
except:
print('Failed to get Config function from ', test_args.model_dir, '/config.py')
from crowd_nav.configs.config import Config
config = Config()
# configure logging and device
# print test result in log file
log_file = os.path.join(test_args.model_dir,'test')
if not os.path.exists(log_file):
os.mkdir(log_file)
if test_args.visualize:
log_file = os.path.join(test_args.model_dir, 'test', 'test_visual.log')
else:
log_file = os.path.join(test_args.model_dir, 'test', 'test_'+test_args.test_model+'.log')
file_handler = logging.FileHandler(log_file, mode='w')
stdout_handler = logging.StreamHandler(sys.stdout)
level = logging.INFO
logging.basicConfig(level=level, handlers=[stdout_handler, file_handler],
format='%(asctime)s, %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
logging.info('robot FOV %f', config.robot.FOV)
logging.info('humans FOV %f', config.humans.FOV)
torch.manual_seed(config.env.seed)
torch.cuda.manual_seed_all(config.env.seed)
if config.training.cuda:
if config.training.cuda_deterministic:
# reproducible but slower
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
# not reproducible but faster
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
torch.set_num_threads(1)
device = torch.device("cuda" if config.training.cuda else "cpu")
logging.info('Create other envs with new settings')
if test_args.visualize:
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.set_xlabel('x(m)', fontsize=16)
ax.set_ylabel('y(m)', fontsize=16)
plt.ion()
plt.show()
else:
ax = None
load_path=os.path.join(test_args.model_dir,'checkpoints', test_args.test_model)
print(load_path)
env_name = config.env.env_name
recurrent_cell = 'GRU'
eval_dir = os.path.join(test_args.model_dir,'eval')
if not os.path.exists(eval_dir):
os.mkdir(eval_dir)
envs = make_vec_envs(env_name, config.env.seed, 1,
config.reward.gamma, eval_dir, device, allow_early_resets=True,
config=config, ax=ax, test_case=test_args.test_case)
actor_critic = Policy(
envs.observation_space.spaces, # pass the Dict into policy to parse
envs.action_space,
base_kwargs=config,
base=config.robot.policy)
actor_critic.load_state_dict(torch.load(load_path, map_location=device))
actor_critic.base.nenv = 1
# allow the usage of multiple GPUs to increase the number of examples processed simultaneously
nn.DataParallel(actor_critic).to(device)
ob_rms = False
# actor_critic, ob_rms, eval_envs, num_processes, device, num_episodes
evaluate(actor_critic, ob_rms, envs, 1, device, config, logging, test_args.visualize, recurrent_cell)
if __name__ == '__main__':
main()
|
import os
import tempfile
import numpy as np
from matplotlib import pyplot as plt
# For mockfactory installation, see https://github.com/adematti/mockfactory
from mockfactory import EulerianLinearMock, LagrangianLinearMock, utils, setup_logging
# For cosmoprimo installation see https://cosmoprimo.readthedocs.io/en/latest/user/building.html
from cosmoprimo.fiducial import DESI
from pyrecon import MultiGridReconstruction, PlaneParallelFFTReconstruction, RealMesh
from pyrecon.metrics import MeshFFTCorrelator, MeshFFTPropagator, MeshFFTTransfer, CatalogMesh
def test_metrics():
z = 1.
# Load DESI fiducial cosmology
cosmo = DESI()
power = cosmo.get_fourier().pk_interpolator().to_1d(z=z)
f = cosmo.sigma8_z(z=z,of='theta_cb')/cosmo.sigma8_z(z=z,of='delta_cb') # growth rate
bias, nbar, nmesh, boxsize, boxcenter, los = 2.0, 1e-3, 128, 1000., 500., (1,0,0)
mock = LagrangianLinearMock(power, nmesh=nmesh, boxsize=boxsize, boxcenter=boxcenter, seed=42, unitary_amplitude=False)
# This is Lagrangian bias, Eulerian bias - 1
mock.set_real_delta_field(bias=bias-1)
mesh_real = mock.mesh_delta_r + 1.
mock.set_analytic_selection_function(nbar=nbar)
mock.poisson_sample(seed=43)
mock.set_rsd(f=f, los=los)
data = mock.to_catalog()
offset = data.boxcenter - data.boxsize/2.
data['Position'] = (data['Position'] - offset) % data.boxsize + offset
#recon = MultiGridReconstruction(f=f, bias=bias, los=los, nmesh=nmesh, boxsize=boxsize, boxcenter=boxcenter, fft_engine='fftw')
recon = PlaneParallelFFTReconstruction(f=f, bias=bias, los=los, nmesh=nmesh, boxsize=boxsize, boxcenter=boxcenter, fft_engine='fftw')
recon.assign_data(data.gget('Position'))
recon.set_density_contrast()
# Run reconstruction
recon.run()
from mockfactory.make_survey import RandomBoxCatalog
randoms = RandomBoxCatalog(nbar=nbar, boxsize=boxsize, boxcenter=boxcenter, seed=44)
data['Position_rec'] = data['Position'] - recon.read_shifts(data['Position'], field='disp+rsd')
randoms['Position_rec'] = randoms['Position'] - recon.read_shifts(randoms['Position'], field='disp')
offset = data.boxcenter - data.boxsize/2.
for catalog in [data, randoms]:
catalog['Position_rec'] = (catalog['Position_rec'] - offset) % catalog.boxsize + offset
kedges = np.arange(0.005, 0.4, 0.005)
#kedges = np.arange(0.005, 0.4, 0.05)
muedges = np.linspace(-1., 1., 5)
dtype = 'f8'
def get_correlator():
mesh_recon = CatalogMesh(data['Position_rec'], shifted_positions=randoms['Position_rec'],
boxsize=boxsize, boxcenter=boxcenter, nmesh=nmesh, resampler='cic', interlacing=2, position_type='pos', dtype=dtype)
return MeshFFTCorrelator(mesh_recon, mesh_real, edges=(kedges, muedges), los=los)
def get_propagator(growth=1.):
mesh_recon = CatalogMesh(data['Position_rec'], shifted_positions=randoms['Position_rec'],
boxsize=boxsize, boxcenter=boxcenter, nmesh=nmesh, resampler='cic', interlacing=2, position_type='pos', dtype=dtype)
return MeshFFTPropagator(mesh_recon, mesh_real, edges=(kedges, muedges), los=los, growth=growth)
def get_transfer(growth=1.):
mesh_recon = CatalogMesh(data['Position_rec'], shifted_positions=randoms['Position_rec'],
boxsize=boxsize, boxcenter=boxcenter, nmesh=nmesh, resampler='cic', interlacing=2, position_type='pos', dtype=dtype)
return MeshFFTTransfer(mesh_recon, mesh_real, edges=(kedges, muedges), los=los, growth=growth)
def get_propagator_ref():
# Taken from https://github.com/cosmodesi/desi_cosmosim/blob/master/reconstruction/propagator_and_multipole/DESI_Recon/propagator_catalog_calc.py
from nbodykit.lab import ArrayMesh, FFTPower
from pmesh.pm import ParticleMesh
meshp = data.to_nbodykit().to_mesh(position='Position_rec', Nmesh=nmesh, BoxSize=boxsize, resampler='cic', compensated=True, interlaced=True, dtype='c16')
meshran = randoms.to_nbodykit().to_mesh(position='Position_rec', Nmesh=nmesh, BoxSize=boxsize, resampler='cic', compensated=True, interlaced=True, dtype='c16')
#mesh_recon = ArrayMesh(meshp.compute() - meshran.compute(), BoxSize=boxsize)
mesh_recon = meshp.compute() - meshran.compute()
Nmu = len(muedges) - 1
kmin, kmax, dk = kedges[0], kedges[-1]+1e-9, kedges[1] - kedges[0]
pm = ParticleMesh(BoxSize=mesh_real.pm.BoxSize, Nmesh=mesh_real.pm.Nmesh, dtype='c16', comm=mesh_real.pm.comm)
mesh_complex = pm.create(type='real')
mesh_complex[...] = mesh_real[...]
r_cross = FFTPower(mesh_complex, mode='2d', Nmesh=nmesh, Nmu=Nmu, dk=dk, second=mesh_recon, los=los, kmin=kmin, kmax=kmax)
#r_auto = FFTPower(mesh_recon, mode='2d', Nmesh=nmesh, Nmu=Nmu, dk=dk, los=los, kmin=kmin, kmax=kmax)
r_auto_init = FFTPower(mesh_complex, mode='2d', Nmesh=nmesh, Nmu=Nmu, dk=dk, los=los, kmin=kmin, kmax=kmax)
#print(r_auto_init.power['modes'])
return (r_cross.power['power']/r_auto_init.power['power']).real/bias, r_cross.power['power'].real, r_auto_init.power['power'].real
propagator_ref, cross_ref, auto_init_ref = get_propagator_ref()
correlator = get_correlator()
with tempfile.TemporaryDirectory() as tmp_dir:
fn = os.path.join(tmp_dir, 'tmp.npy')
correlator.save(fn)
correlator = MeshFFTCorrelator.load(fn)
propagator = correlator.to_propagator(growth=bias)
assert np.allclose(propagator.ratio, propagator_ref, atol=1e-6, rtol=1e-4, equal_nan=True)
transfer = correlator.to_transfer(growth=bias)
assert np.allclose(get_propagator(growth=bias).ratio, propagator.ratio, equal_nan=True)
assert np.allclose(get_transfer(growth=bias).ratio, transfer.ratio, equal_nan=True)
fig, lax = plt.subplots(nrows=1, ncols=3, figsize=(14,4))
fig.subplots_adjust(wspace=0.3)
lax = lax.flatten()
for imu, mu in enumerate(correlator.muavg[3:]):
k = correlator.k[:,imu]
mask = k < 0.6
k = k[mask]
lax[0].plot(k, correlator(k=k, mu=mu), label=r'$\mu = {:.2f}$'.format(mu))
lax[1].plot(k, transfer(k=k, mu=mu), label=r'$\mu = {:.2f}$'.format(mu))
lax[2].plot(k, propagator(k=k, mu=mu), label=r'$\mu = {:.2f}$'.format(mu))
for ax in lax:
ax.legend()
ax.grid(True)
ax.set_xlabel('$k$ [$\mathrm{Mpc}/h$]')
lax[0].set_ylabel(r'$r(k) = P_{\mathrm{rec},\mathrm{init}}/\sqrt{P_{\mathrm{rec}}P_{\mathrm{init}}}$')
lax[1].set_ylabel(r'$t(k) = \sqrt{P_{\mathrm{rec}}/P_{\mathrm{init}}}$')
lax[2].set_ylabel(r'$g(k) = P_{\mathrm{rec},\mathrm{init}}/P_{\mathrm{init}}$')
plt.show()
ax = plt.gca()
auto = correlator.auto_initial
auto.rebin((1, len(auto.edges[-1])-1))
ax.plot(auto.k[:,0], auto.k[:,0]*auto.power[:,0].real*bias**2, label='initial')
auto = correlator.auto_reconstructed
auto.rebin((1, len(auto.edges)-1))
ax.plot(auto.k[:,0], auto.k[:,0]*auto.power[:,0].real, label='reconstructed')
ax.legend()
plt.show()
if __name__ == '__main__':
# Set up logging
setup_logging()
test_metrics()
|
#
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project.
#
from tests.engine.test_store import get_test_store
from data_wrangling_components.engine.verbs.aggregate import aggregate
from data_wrangling_components.types import FieldAggregateOperation, Step, Verb
def test_aggregate_with_count():
step = Step(
Verb.Aggregate,
"table3",
"output",
args={
"to": "newColumn",
"groupby": "ID",
"column": "item",
"operation": FieldAggregateOperation.Count,
},
)
store = get_test_store()
result = aggregate(step, store)
assert len(result.columns) == 2
assert len(result) == 3
assert result.loc[0, "ID"] == 1
assert result.loc[0, "newColumn"] == 2
assert result.loc[1, "ID"] == 2
assert result.loc[1, "newColumn"] == 1
assert result.loc[2, "ID"] == 4
assert result.loc[2, "newColumn"] == 3
def test_aggregate_with_sum():
step = Step(
Verb.Aggregate,
"table4",
"output",
args={
"to": "newColumn",
"groupby": "ID",
"column": "quantity",
"operation": FieldAggregateOperation.Sum,
},
)
store = get_test_store()
result = aggregate(step, store)
assert len(result.columns) == 2
assert len(result) == 3
assert result.loc[0, "ID"] == 1
assert result.loc[0, "newColumn"] == 123
assert result.loc[1, "ID"] == 2
assert result.loc[1, "newColumn"] == 100
assert result.loc[2, "ID"] == 4
assert result.loc[2, "newColumn"] == 184
def test_aggregate_with_min():
step = Step(
Verb.Aggregate,
"table4",
"output",
args={
"to": "newColumn",
"groupby": "ID",
"column": "quantity",
"operation": FieldAggregateOperation.Min,
},
)
store = get_test_store()
result = aggregate(step, store)
assert len(result.columns) == 2
assert len(result) == 3
assert result.loc[0, "ID"] == 1
assert result.loc[0, "newColumn"] == 45
assert result.loc[1, "ID"] == 2
assert result.loc[1, "newColumn"] == 100
assert result.loc[2, "ID"] == 4
assert result.loc[2, "newColumn"] == 45
def test_aggregate_with_median():
step = Step(
Verb.Aggregate,
"table4",
"output",
args={
"to": "newColumn",
"groupby": "ID",
"column": "quantity",
"operation": FieldAggregateOperation.Median,
},
)
store = get_test_store()
result = aggregate(step, store)
assert len(result.columns) == 2
assert len(result) == 3
assert result.loc[0, "ID"] == 1
assert result.loc[0, "newColumn"] == 61.5
assert result.loc[1, "ID"] == 2
assert result.loc[1, "newColumn"] == 100
assert result.loc[2, "ID"] == 4
assert result.loc[2, "newColumn"] == 50
|
from pylab import plot, show, legend
from random import normalvariate
x = [normalvariate(0, 1) for i in range(100)]
plot(x, 'b-', label="white noise")
legend()
show()
|
from django.shortcuts import render
from django import forms
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.urls import reverse
from .form import RenewPatientsForm,Sortform,Searchform
from django.views import generic
from django.http.response import JsonResponse
from .models import Patients, Clinic_information
from django.contrib.auth.decorators import permission_required
@permission_required('catalog.can_mark_returned')
# Create your views here.
def index(request):
num_patients=Patients.objects.all().count()
return render(
request,
'index.html',
context = {'num_patients':num_patients},
)
class PatientView(generic.ListView):
model = Patients
class PatientDetailView(generic.DetailView):
model = Patients
def display_all(request):
model = Patients
patients_list = Patients.objects.all()
newlist=[]
for patient_temp in patients_list:
data={
'id':patient_temp.id,
'name': patient_temp.name,
'specimen_type':patient_temp.specimen_type,
'other_type':patient_temp.other_type,
'collectiondate':patient_temp.collectiondate,
'collectiontime':patient_temp.collectiontime,
'hkid':patient_temp.hkid,
'ethnicity':patient_temp.ethnicity,
'sex': patient_temp.sex,
'dob':patient_temp.dob,
'referral':patient_temp.referral,
'clinicid':patient_temp.clinicid,
'doctor':patient_temp.doctor,
'phone':patient_temp.phone,
'fax':patient_temp.fax,
}
newlist.append(data)
return render(request,'catalog/displaytable.html',{'patients_list':newlist})
def sortingori(request):
form=Sortform(request.POST or None)
patients_list = Patients.objects.all()
if request.method=='POST':
if form.is_valid():
sortkey=form.cleaned_data['sortkey']
print(patients_list)
return HttpResponseRedirect(reverse('sorting',kwargs={'sortkey':sortkey}))
else:
return HttpResponseRedirect(reverse('sortingori'))
return render(request,'catalog/sortingori.html',{'patients_list':patients_list,'form':form})
def searchori(request):
formsearch=Searchform(request.POST or None)
patients_list = Patients.objects.all()
if request.method=='POST':
if formsearch.is_valid():
searchkey=formsearch.cleaned_data['searchkey']
searchvalue=formsearch.cleaned_data['searchvalue']
return HttpResponseRedirect(reverse('search',kwargs={'searchkey':searchkey,'searchvalue':searchvalue}))
else:
return HttpResponseRedirect(reverse('display'))
return render(request,'catalog/searchori.html',{'patients_list':patients_list,'formsearch':formsearch})
def sorting(request,sortkey):
model = Patients
patients_list = Patients.objects.order_by(sortkey)
form=Sortform(request.POST or None)
if request.method=='POST':
if form.is_valid():
sortkey=form.cleaned_data['sortkey']
print(patients_list)
return HttpResponseRedirect(reverse('sorting',kwargs={'sortkey':sortkey}))
else:
return HttpResponseRedirect(reverse('sorting',kwargs={'sortkey':sortkey}))
return render(request,'catalog/sorting.html',{'patients_list':patients_list,'form':form})
def renew_patients(request,pk):
patient_temp = get_object_or_404(Patients,pk=pk)
if request.method == 'POST':
form = RenewPatientsForm(
request.POST
)
if form.is_valid():
patient_temp.specimen_type = form.cleaned_data['renew_specimen_type']
patient_temp.other_type = form.cleaned_data['renew_other_type']
patient_temp.collectiondate = form.cleaned_data['renew_collectiondate']
patient_temp.collectiontime = form.cleaned_data['renew_collectiontime']
patient_temp.name = request.POST.get('renew_name')
patient_temp.hkid = form.cleaned_data['renew_hkid']
patient_temp.ethnicity = form.cleaned_data['renew_ethnicity']
patient_temp.sex = form.cleaned_data['renew_sex']
patient_temp.dob = form.cleaned_data['renew_dob']
patient_temp.clinicid = form.cleaned_data['renew_clinicid']
patient_temp.doctor = form.cleaned_data['renew_doctor']
patient_temp.phone = form.cleaned_data['renew_phone']
patient_temp.fax = form.cleaned_data['renew_fax']
patient_temp.referral = form.cleaned_data['renew_referral']
patient_temp.clinic.diagnosis = form.cleaned_data['renew_diagnosis']
patient_temp.clinic.previous = form.cleaned_data['renew_previous']
patient_temp.clinic.spid = form.cleaned_data['renew_spid']
patient_temp.clinic.celltype = form.cleaned_data['renew_celltype']
patient_temp.clinic.other = form.cleaned_data['renew_other']
form.fields['renew_stage'].initial = patient_temp.clinic.stage
patient_temp.clinic.stage = form.cleaned_data['renew_stage']
patient_temp.clinic.status = form.cleaned_data['renew_status']
patient_temp.clinic.additional = form.cleaned_data['renew_additional']
patient_temp.clinic.request = form.cleaned_data['renew_request']
patient_temp.clinic.save()
patient_temp.save()
return HttpResponseRedirect(reverse('patients-detail',args=[str(patient_temp.id)]))
else:
form = RenewPatientsForm(initial={
'renew_specimen_type': patient_temp.specimen_type,
'renew_other_type': patient_temp.other_type,
'renew_collectiondate':patient_temp.collectiondate,
'renew_collectiontime':patient_temp.collectiontime,
'renew_name': patient_temp.name,
'renew_hkid':patient_temp.hkid,
'renew_ethnicity':patient_temp.ethnicity,
'renew_sex':patient_temp.sex,
'renew_referral':patient_temp.referral,
'renew_dob':patient_temp.dob,
'renew_clinicid':patient_temp.clinicid,
'renew_doctor':patient_temp.doctor,
'renew_phone':patient_temp.phone,
'renew_fax':patient_temp.fax,
'renew_diagnosis':patient_temp.clinic.diagnosis,
'renew_previous':patient_temp.clinic.previous,
'renew_spid':patient_temp.clinic.spid,
'renew_celltype':patient_temp.clinic.celltype,
'renew_other':patient_temp.clinic.other,
'renew_stage':patient_temp.clinic.stage,
'renew_status':patient_temp.clinic.status,
'renew_additional':patient_temp.clinic.additional,
'renew_request':patient_temp.clinic.request,
})
return render(request, 'catalog/renew_patients.html',{'form':form,'patienttemp':patient_temp})
def add_patients(request):
patient_temp = Patients()
if request.method == 'POST':
form = RenewPatientsForm(request.POST)
if form.is_valid():
clinic_temp=Clinic_information(
clinic_name = form.cleaned_data['renew_name'],
previous = form.cleaned_data['renew_previous'],
spid = form.cleaned_data['renew_spid'],
diagnosis = form.cleaned_data['renew_diagnosis'],
celltype = form.cleaned_data['renew_celltype'],
stage = form.cleaned_data['renew_stage'],
status = form.cleaned_data['renew_status'],
additional = form.cleaned_data['renew_additional'],
request = form.cleaned_data['renew_request'],
other = form.cleaned_data['renew_other']
)
clinic_temp.save()
patient_temp=Patients(
specimen_type = form.cleaned_data['renew_specimen_type'],
other_type = form.cleaned_data['renew_other_type'],
collectiondate = form.cleaned_data['renew_collectiondate'],
collectiontime = form.cleaned_data['renew_collectiontime'],
name = form.cleaned_data['renew_name'],
hkid = form.cleaned_data['renew_hkid'],
ethnicity = form.cleaned_data['renew_ethnicity'],
sex = form.cleaned_data['renew_sex'],
dob = form.cleaned_data['renew_dob'],
referral = form.cleaned_data['renew_referral'],
clinicid = form.cleaned_data['renew_clinicid'],
doctor = form.cleaned_data['renew_doctor'],
phone = form.cleaned_data['renew_phone'],
fax = form.cleaned_data['renew_fax'],
clinic = clinic_temp,
)
patient_temp.save()
return HttpResponseRedirect(reverse('patients-detail',args=[str(patient_temp.id)]))
else:
form = RenewPatientsForm(initial={
'renew_name': '',
'renew_hkid':'',
'renew_ethnicity':'',
'renew_sex':'',
'renew_dob':'',
'renew_clinicid':'',
'renew_doctor':'',
'renew_phone':'',
'renew_fax':'',
})
return render(request, 'catalog/add_patients.html',{'form':form,'patienttemp':patient_temp})
def delete_patients(request,pk):
patient_temp = get_object_or_404(Patients,pk=pk)
patient_temp.delete()
return render(request, 'catalog/delete_patients.html')
import json
def jsondata(request,pk):
patient_temp = get_object_or_404(Patients,pk=pk)
data={
'id':patient_temp.id,
'name': patient_temp.name,
'specimen_type':patient_temp.specimen_type,
'other_type':patient_temp.other_type,
'collectiondate':patient_temp.collectiondate,
'collectiontime':patient_temp.collectiontime,
'hkid':patient_temp.hkid,
'ethnicity':patient_temp.ethnicity,
'sex': patient_temp.sex,
'dob':patient_temp.dob,
'referral':patient_temp.referral,
'clinicid':patient_temp.clinicid,
'doctor':patient_temp.doctor,
'phone':patient_temp.phone,
'fax':patient_temp.fax,
}
return render(request,'catalog/datatable.html',{'data':json.dumps(data)})
def search(request,searchkey,searchvalue):
model = Patients
patients_list = Patients.objects.filter(**{searchkey:searchvalue})
formsearch=Searchform(request.POST or None)
if request.method=='POST':
formsearch=Searchform(request.POST)
if formsearch.is_valid():
searchkey=formsearch.cleaned_data['searchkey']
searchvalue=formsearch.cleaned_data['searchvalue']
return HttpResponseRedirect(reverse('search',kwargs={'searchkey':searchkey,'searchvalue':searchvalue}))
else:
form=Sortform(initial={
'searchkey':searchkey,
'searchvalue':searchvalue,
})
return HttpResponseRedirect(reverse('display'))
return render(request,'catalog/search.html',{'formsearch':formsearch,'patients_list':patients_list})
|
from ._helper import getAttribute
from ._xfBase import XfBase
class XFindLastIndex(XfBase):
def __init__(self, f, xf):
self.xf = xf
self.f = f
self.idx = -1
self.lastIdx = -1
def result(self, result):
return getAttribute(self.xf, '@@transducer/result')(getAttribute(self.xf, '@@transducer/step')(result, self.lastIdx))
def step(self, result, _input):
self.idx += 1
if self.f(_input):
self.lastIdx = self.idx
return result
def _xfindLastIndex(f): return lambda xf: XFindLastIndex(f, xf)
|
from model.address import Address
import re
class AddressHelper:
def __init__(self,app):
self.app = app
def edit(self, address, index):
wd = self.app.wd
self.open_home_page()
self.select_address_by_index(index)
wd.find_elements_by_css_selector("img[alt='Edit']")[index].click()
self.fill_address_form(address)
self.address_cache = None
def create(self, address):
wd = self.app.wd
self.open_home_page()
# initial address creation
wd.find_element_by_link_text("add new").click()
self.fill_address_form(address)
self.address_cache = None
def fill_address_form(self, address):
wd = self.app.wd
if address.first_name is not None:
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(address.first_name)
if address.midle_name is not None:
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(address.midle_name)
if address.last_name is not None:
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(address.last_name)
if address.nick_name is not None:
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(address.nick_name)
if address.company is not None:
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(address.company)
if address.addrs is not None:
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(address.addrs)
if address.home is not None:
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(address.home)
if address.mobile is not None:
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(address.mobile)
if address.work is not None:
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(address.work)
if address.fax is not None:
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(address.fax)
if address.email is not None:
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(address.email)
if address.email is not None:
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(address.email)
if address.email2 is not None:
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(address.email)
if address.email3 is not None:
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(address.email)
def open_home_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/index.php") and len(wd.find_elements_by_xpath("//input[@value='Send e-Mail']"))) >0:
wd.find_element_by_link_text("home").click()
def submit(self):
wd = self.app.wd
# submit address creation
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
def delete_first_address(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
# select first address
wd.find_element_by_name("selected[]").click()
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.address_cache = None
def delete_address_by_index(self, index):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
self.select_address_by_index(index)
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.address_cache = None
def select_address_by_index(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_name("selected[]")[index].click()
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
address_cache = None
def get_address_list(self):
if self.address_cache is None:
wd = self.app.wd
self.open_home_page()
self.address_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
id = element.find_element_by_name("selected[]").get_attribute("value")
last_name = cells[1].text
first_name = cells[2].text
all_phones = cells[5].text
all_emails = cells[4].text
self.address_cache.append(Address(last_name=last_name, first_name=first_name, id=id, all_phones_from_home_page=all_phones, all_emails_from_home_page=all_emails))
return list(self.address_cache)
def get_address_info_from_edit_page(self, index):
wd = self.app.wd
self.select_address_by_index(index)
wd.find_elements_by_css_selector("img[alt='Edit']")[index].click()
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home = wd.find_element_by_name("home").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Address(first_name=firstname, last_name=lastname, id=id, home=home, work=work, mobile=mobile, secondaryphone=secondaryphone, email=email, email2=email2, email3=email3)
def get_address_from_view_page(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_css_selector('img[alt="Details"]')[index].click()
text = wd.find_element_by_id("content").text
home = re.search("H: (.*)", text).group(1)
work = re.search("W: (.*)", text).group(1)
mobile = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
return Address(home=home, work=work, mobile=mobile,
secondaryphone=secondaryphone)
def delete_address_by_id(self, id):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
self.select_address_by_id(id)
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.address_cache = None
def select_address_by_id(self, id):
wd = self.app.wd
self.open_home_page()
#wd.find_element_by_css_selector("input[value={0}]".format(id)).click()
wd.find_element_by_css_selector("input[value='%s']" % id).click()
|
from views import index, wait_game, add_new_user, login_user, logout_user, get_active_users
from web_socket import websocket_handler
from settings import BASE_DIR
def setup_routes(app):
app.router.add_get('/', index)
app.router.add_get('/wait_game', wait_game)
app.router.add_get('/ws', websocket_handler)
app.router.add_post('/users', add_new_user)
app.router.add_post('/login', login_user)
app.router.add_post('/logout', logout_user)
app.router.add_get('/users', get_active_users)
def setup_static_routes(app):
app.router.add_static('/static/',
path=BASE_DIR / 'static',
name='static')
|
import sys
from html.parser import HTMLParser
from .models import Customer, Account, Statement
from typing import List
class HtmlParserCustomers(HTMLParser):
"""A class used to parse customers html to Customer objects."""
def __init__(self, username: str = '') -> None:
HTMLParser.__init__(self)
# customers data
self.customer_doc = username
self.customers = list()
self.customer_object_entered = False
self.customer_attribute_entered = False
self.customer_li_count = 0
self.customer_name, self.customer_address, self.customer_emails, self.customer_phones = ('','','','')
def handle_starttag(self, tag: str, attrs: List[tuple]) -> None:
"""Set instance properties based on opening html tags."""
# beginning of customer html
if tag == 'ul':
for name, value in attrs:
if name == 'class' and value == 'collection with-header':
self.customer_object_entered = True
# beginning of customer attribute html
if self.customer_object_entered == True and tag == 'li':
self.customer_attribute_entered = True
self.customer_li_count += 1
def handle_endtag(self, tag: str) -> None:
"""Set instance properties based on closing html tags."""
# end of customer html
if tag == 'ul' and self.customer_object_entered == True:
self.customers.append(Customer(self.customer_name, self.customer_doc, self.customer_address, self.customer_emails, self.customer_phones))
self.customer_object_entered = False
self.customer_attribute_entered = False
self.customer_li_count = 0
self.customer_name, self.customer_address, self.customer_emails, self.customer_phones = ('','','','')
def handle_data(self, data: str) -> None:
"""Set instance properties based on html data."""
# customer attribute text that's not empty
if self.customer_attribute_entered == True and data.strip():
if self.customer_li_count == 1:
self.customer_name = data
elif self.customer_li_count == 2:
self.customer_phones = data
elif self.customer_li_count == 3:
self.customer_emails = data
elif self.customer_li_count == 4:
self.customer_address = data
class HtmlParserAccounts(HTMLParser):
"""A class used to parse accounts html to Account objects."""
def __init__(self) -> None:
HTMLParser.__init__(self)
# accounts data
self.accounts = list()
self.account_object_entered = False
self.account_attributes_entered = False
self.account_attribute_count = 0
self.account_name, self.account_number, self.account_balance, self.account_id = ('','','','')
def handle_starttag(self, tag: str, attrs: List[tuple]) -> None:
"""Set instance properties based on opening html tags."""
# beginning of account html
if tag == 'ul':
for name, value in attrs:
if name == 'class' and value == 'collection':
self.account_object_entered = True
# beginning of account attribute html
if self.account_object_entered == True and tag == 'li':
self.account_attributes_entered = True
# account id
if self.account_attributes_entered == True and tag == 'a':
for name, value in attrs:
if name == 'href':
self.account_id = value[value.index('/')+1:]
def handle_endtag(self, tag: str) -> None:
"""Set instance properties based on closing html tags."""
# end of account html
if tag == 'li' and self.account_attributes_entered == True:
self.accounts.append(Account(self.account_name, self.account_number, self.account_balance, self.account_id))
self.account_attributes_entered = False
self.account_attribute_count = 0
self.account_name, self.account_address, self.account_emails, self.account_phones = ('','','','')
if tag == 'ul' and self.account_object_entered == True:
self.account_object_entered = False
def handle_data(self, data: str) -> None:
"""Set instance properties based on html data."""
# account attribute text that's not empty
if self.account_attributes_entered == True and data.strip():
self.account_attribute_count += 1
if self.account_attribute_count == 1:
self.account_name = data
elif self.account_attribute_count == 2:
self.account_number = data
elif self.account_attribute_count == 3:
self.account_balance = data
class HtmlParserStatements(HTMLParser):
"""A class used to parse statements html to Statement objects."""
def __init__(self) -> None:
HTMLParser.__init__(self)
# statements data
self.statements = list()
self.statement_headerPositions = {}
self.statement_thead_entered = False
self.statement_thead_td_entered = False
self.statement_thead_count = 0
self.statements_html_entered = False
self.statement_object_entered = False
self.statement_attributes_entered = False
self.statement_attribute_count = 0
self.statement_date, self.statement_amount, self.statement_balance, self.statement_concept = ('','','','')
def handle_starttag(self, tag: str, attrs: List[tuple]) -> None:
"""Set instance properties based on opening html tags."""
# beginning of statement html
if tag == 'thead':
self.statement_thead_entered = True
if tag == 'td' and self.statement_thead_entered == True:
self.statement_thead_td_entered = True
self.statement_thead_count += 1
if tag == 'tbody':
self.statements_html_entered = True
# beginning of statement object
if tag == 'tr' and self.statements_html_entered == True:
self.statement_object_entered = True
# beginning of statement attribute html
if self.statement_object_entered == True and tag == 'td':
self.statement_attributes_entered = True
self.statement_attribute_count += 1
def handle_endtag(self, tag: str) -> None:
"""Set instance properties based on closing html tags."""
# end of statement header html
if tag == 'thead' and self.statement_thead_entered == True:
self.statement_thead_entered = False
self.statement_thead_td_entered = False
self.statement_thead_count = 0
# end of statement object html
if tag == 'tr' and self.statement_attributes_entered == True:
self.statements.append(Statement(self.statement_date, self.statement_amount, self.statement_balance, self.statement_concept))
self.statement_object_entered = False
self.statement_attributes_entered = False
self.statement_attribute_count = 0
self.statement_date, self.statement_amount, self.statement_balance, self.statement_concept = ('','','','')
# end of statements html
if tag == 'tbody':
self.statements_html_entered = False
def handle_data(self, data: str) -> None:
"""Set instance properties based on html data."""
# statement header text that's not empty
if self.statement_thead_td_entered == True and data.strip():
self.statement_headerPositions[self.statement_thead_count] = data.lower()
# statement attribute text that's not empty
if self.statement_attributes_entered == True and data.strip():
# if the attribute is in the header,
# user the header for reference
if self.statement_attribute_count in self.statement_headerPositions:
if self.statement_headerPositions[self.statement_attribute_count] == 'statement':
self.statement_concept = data
elif self.statement_headerPositions[self.statement_attribute_count] == 'date':
self.statement_date = data
elif self.statement_headerPositions[self.statement_attribute_count] == 'amount':
self.statement_amount = data
elif self.statement_headerPositions[self.statement_attribute_count] == 'balance':
self.statement_balance = data
# otherwise fall back to a set position
else:
if self.statement_attribute_count == 1:
self.statement_concept = data
elif self.statement_attribute_count == 2:
self.statement_date = data
elif self.statement_attribute_count == 3:
self.statement_amount = data
elif self.statement_attribute_count == 4:
self.statement_balance = data
class HtmlObjects:
"""A class used to parse html to objects."""
def __init__(self) -> None:
pass
def parse_customers_html_to_objects(self, customers_html: str, username: str) -> List[Customer]:
"""Iterate over the customers' html, and create and return Customer objects."""
html_parser_customers = HtmlParserCustomers(username)
html_parser_customers.feed(customers_html)
html_parser_customers.close()
return html_parser_customers.customers
def parse_accounts_html_to_objects(self, accounts_html: str) -> List[Account]:
"""Iterate over the accounts' html, and create and return Account objects."""
html_parser_accounts = HtmlParserAccounts()
html_parser_accounts.feed(accounts_html)
html_parser_accounts.close()
return html_parser_accounts.accounts
def parse_statements_html_to_objects(self, statements_html: str) -> List[Statement]:
"""Iterate over the statements' html, and create and return Statement objects."""
html_parser_statements = HtmlParserStatements()
html_parser_statements.feed(statements_html)
html_parser_statements.close()
return html_parser_statements.statements
|
import os
import sys
from microbench.benchmarks import BENCHMARKS_TO_RUN
from util.constants import LOG
from microbench.constants import (LAX_TOLERANCE, MIN_TIME, BENCHMARK_THREADS,
BENCHMARK_PATH, BENCHMARK_LOGFILE_PATH, LOCAL_REPO_DIR, JENKINS_REF_PROJECT)
class Config(object):
""" Configuration for run_micro_bench. All information is read-only. """
def __init__(self, benchmark_path=BENCHMARK_PATH, benchmarks=BENCHMARKS_TO_RUN, lax_tolerance=LAX_TOLERANCE, min_time=MIN_TIME,
num_threads=BENCHMARK_THREADS, logfile_path=BENCHMARK_LOGFILE_PATH, is_local=False, jenkins_folders=[], branch=None, publish_results_env='none',
publish_results_username=None, publish_results_password=None):
validate_benchmark_path(benchmark_path)
# path to benchmark binaries
self.benchmark_path = benchmark_path
validate_benchmarks(benchmarks)
# benchmark executables to run
self.benchmarks = sorted(benchmarks)
# if fewer than min_ref_values are available
self.lax_tolerance = lax_tolerance
# minimum run time for the benchmark, seconds
self.min_time = min_time
# the number of threads to use for running microbenchmarks
self.num_threads = num_threads
self.logfile_path = logfile_path
# if local run is specified make sure the local repo is set up
self.is_local = is_local
if self.is_local:
if not os.path.exists(LOCAL_REPO_DIR):
os.mkdir(LOCAL_REPO_DIR)
self.ref_data_source = {
"folders": jenkins_folders,
"project": JENKINS_REF_PROJECT,
"branch": branch
}
# Environment to which the microbenchmark results will be published (primarily 'prod')
self.publish_results_env = publish_results_env
# Credentials needed to send results to the performance storage service
self.publish_results_username = publish_results_username
self.publish_results_password = publish_results_password
return
def validate_benchmark_path(benchmark_path):
if not os.path.exists(benchmark_path):
LOG.error("The benchmark executable path directory {} does not exist".format(
benchmark_path))
sys.exit(1)
def validate_benchmarks(benchmarks):
for benchmark in benchmarks:
if not benchmark.endswith("_benchmark"):
LOG.error("Invalid target benchmark {}".format(benchmark))
sys.exit(1)
|
import csv
from cambio import *
from classes.pokemon import *
from classes.pokemon_planta import *
from classes.pokemon_electrico import *
from classes.pokemon_agua import *
from classes.pokemon_fuego import *
from combate import *
if __name__ == "__main__":
entrenador1_pokemon1 = Pokemon_planta(1, "bulbasaur", 50, 8, 10, 9)
entrenador1_pokemon2= Pokemon_agua(4, "squirtle", 45, 9, 11, 10)
entrenador2_pokemon1= Pokemon_fuego(7, "charmander", 47, 11, 7, 11)
entrenador2_pokemon2= Pokemon_electrico(25, "pikachu", 43, 12, 8, 13)
pokemonentrenador1 = [entrenador1_pokemon1, entrenador1_pokemon2]
pokemonentrenador2 = [entrenador2_pokemon1, entrenador2_pokemon2]
combateconcambio(pokemonentrenador1, pokemonentrenador2)
|
# Generated by Django 4.0.2 on 2022-02-04 03:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='barangay',
old_name='name',
new_name='barangay',
),
migrations.RenameField(
model_name='municipality',
old_name='name',
new_name='municipality',
),
migrations.RenameField(
model_name='province',
old_name='name',
new_name='province',
),
migrations.RenameField(
model_name='region',
old_name='name',
new_name='region',
),
]
|
import mx.Tools
import mx.Tools.NewBuiltins
import time, sys
# forall
print ('forall')
t = (3,) * 10; assert forall(lambda x: x==3,t) == 1
t = t + (4,); assert forall(lambda x: x==3,t) == 0
# exists
print ('exists')
t = (3,) * 10; assert exists(lambda x: x==4,t) == 0
t = t + (4,); assert exists(lambda x: x==4,t) == 1
# count
print ('count')
t = (3,) * 10; assert count(lambda x: x==3,t) == 10
t = t + (4,); assert count(lambda x: x==4,t) == 1
# index
print ('index')
t = (3,) * 10
try:
index(lambda x: x!=3,t)
except ValueError:
ok = 1
else:
ok = 0
assert ok == 1
t = t + (4,); assert index(lambda x: x==4,t) == 10
def testkw(x,a=4):
return x,a
def testtwoargs(a, b):
return a + b
# napply
print ('napply')
t = napply(10,time.time)
t = napply(10,len,(t,))
t = napply(10,testtwoargs,(0,10))
t = napply(10,testkw,(2,),{'a':3})
assert t == ((2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3),
(2, 3), (2, 3), (2, 3), (2, 3))
# trange
print ('trange')
t = trange(10); assert t == tuple(range(10))
t = trange(1,10); assert t == tuple(range(1,10))
t = trange(1,10,2); assert t == tuple(range(1,10,2))
t = trange(1,10,3); assert t == tuple(range(1,10,3))
t = trange(-10); assert t == tuple(range(-10))
t = trange(-1,-10); assert t == tuple(range(-1,-10))
t = trange(-10,-1); assert t == tuple(range(-10,-1))
t = trange(-10,-1,2); assert t == tuple(range(-10,-1,2))
t = trange(-10,-1,3); assert t == tuple(range(-10,-1,3))
t = trange(-1,-10,-1); assert t == tuple(range(-1,-10,-1))
t = trange(-1,-10,-2); assert t == tuple(range(-1,-10,-2))
t = trange(-1,-10,-3); assert t == tuple(range(-1,-10,-3))
# indices
print ('indices')
l = range(10); assert indices(l) == trange(10)
t = trange(10); assert indices(t) == trange(10)
s = '0123456789'; assert indices(s) == trange(10)
# range_len
print ('range_len')
l = range(10); assert range_len(l) == range(10)
t = trange(10); assert range_len(t) == range(10)
s = '0123456789'; assert range_len(s) == range(10)
# irange
print ('irange')
l = range(1,10,2); assert irange(l) == ((0, 1), (1, 3), (2, 5), (3, 7), (4, 9))
t = range(1,10,2); assert irange(t) == ((0, 1), (1, 3), (2, 5), (3, 7), (4, 9))
d = {0:2,1:5,2:7}; assert irange(d) == ((0, 2), (1, 5), (2, 7))
d = {'a':1,'m':2,'r':3,'c':4}; assert irange(d,'marc') == (('m', 2), ('a', 1), ('r', 3), ('c', 4))
l = range(10); assert irange(l,(1,3,5,6,7)) == ((1, 1), (3, 3), (5, 5), (6, 6), (7, 7))
t = range(10); assert irange(t,(4,1,5,2,3)) == ((4, 4), (1, 1), (5, 5), (2, 2), (3, 3))
# ifilter
print ('ifilter')
c = lambda x: x>5
l = range(10); assert ifilter(c,l) == [(6, 6), (7, 7), (8, 8), (9, 9)]
t = trange(10); assert ifilter(c,t) == [(6, 6), (7, 7), (8, 8), (9, 9)]
c = lambda x: x>='f'
s = 'abcdefghijk'; assert ifilter(c,s) == [(5, 'f'), (6, 'g'), (7, 'h'), (8, 'i'), (9, 'j'), (10, 'k')]
c = lambda x: x>5
l = range(10); assert ifilter(c,l,(2,6,7)) == [(6, 6), (7, 7)]
t = trange(10); assert ifilter(c,t,(7,6,2)) == [(7, 7), (6, 6)]
c = lambda x: x>='f'
s = 'abcdefghijk'; assert ifilter(c,s,(1,3,5,7)) == [(5, 'f'), (7, 'h')]
# mapply
print ('mapply')
class C:
def test(self,x,y):
return (x,y)
o = napply(10,C,()) # create 10 objects
l = map(getattr,o,('test',)*len(o)) # get test methods
r = mapply(l,(1,2)) # call each of them with (1,2)
assert r == ((1,2),)*10
# method_mapply
print ('method_mapply')
l = [None] * 100000
for i in indices(l):
l[i] = []
print 'for-loop:',
start = time.clock()
for x in l:
x.append('hi')
print time.clock() - start,'seconds'
print 'map:',
start = time.clock()
map(lambda x: x.append('hi'),l)
print time.clock() - start,'seconds'
print 'method_mapply:',
start = time.clock()
method_mapply(l,'append',('hi',))
print time.clock() - start,'seconds'
print 'checking...'
for x,y,z in l:
assert x == y == z
# get
print ('get')
l = range(10)
assert get(l,2) == 2
assert get(l,20,2) == 2
# extract
print ('extract')
l = range(10)
assert extract(l,(1,2,3)) == [1,2,3]
assert extract(l,(1,20,30),(1,20,30)) == [1,20,30]
# findattr
print ('findattr')
l = []
d = {}
assert findattr((l,d),'count')
assert findattr((l,d),'items')
# tuples
print ('tuples')
a = range(1,10)
b = range(2,12)
c = range(3,14)
assert tuples(a,b,c) == [(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6),
(5, 6, 7), (6, 7, 8), (7, 8, 9), (8, 9, 10),
(9, 10, 11)]
assert tuples(c,b,a,b,c) == \
[(3, 2, 1, 2, 3), (4, 3, 2, 3, 4), (5, 4, 3, 4, 5), (6, 5, 4, 5, 6),
(7, 6, 5, 6, 7), (8, 7, 6, 7, 8), (9, 8, 7, 8, 9), (10, 9, 8, 9, 10),
(11, 10, 9, 10, 11), (12, 11, None, 11, 12),
(13, None, None, None, 13)]
# lists
print ('lists')
a = range(1,10)
b = range(2,11)
c = range(3,12)
assert (a,b,c) == lists(tuples(a,b,c))
assert lists(b,c,a) == ([2, 3, 1], [3, 4, 2], [4, 5, 3], [5, 6, 4],
[6, 7, 5], [7, 8, 6], [8, 9, 7], [9, 10, 8],
[10, 11, 9])
assert lists(b[:3],a,c) == ([2, 1, 3], [3, 2, 4], [4, 3, 5])
# dict
print ('dict')
items = tuples(a,b)
d = dict(items)
assert d == {9: 10, 8: 9, 7: 8, 6: 7, 5: 6, 4: 5, 3: 4, 2: 3, 1: 2}
# invdict
print ('invdict')
assert invdict(d) == {10: 9, 9: 8, 8: 7, 7: 6, 6: 5, 5: 4, 4: 3, 3: 2, 2: 1}
# acquire
print ('acquire')
class C:
baseobj = None
def __init__(self,baseobj=None):
self.baseobj = baseobj
__getattr__ = acquire
class B:
a = 1
b = B()
c = C(baseobj=b)
assert c.a == 1
if 0:
# xmap
print ('xmap')
import xmap
m = xmap(lambda x: 2*x, xrange(sys.maxint))
assert list(m[0:10]) == [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
assert list(m[10000:10010]) == [20000, 20002, 20004, 20006, 20008, 20010,
20012, 20014, 20016, 20018]
try:
m[sys.maxint-1]
except OverflowError:
pass
else:
raise AssertionError,'should have received an OverflowError'
# iremove
print ('iremove')
l = range(10)
iremove(l,(1,2,3))
assert l == [0, 4, 5, 6, 7, 8, 9]
d = dict(tuples(range(10),range(1,11)))
iremove(d,(1,2,3))
assert d == {9: 10, 8: 9, 7: 8, 6: 7, 5: 6, 4: 5, 0: 1}
# verscmp
print ('verscmp')
verscmp = mx.Tools.verscmp
assert verscmp('1.0','1.1') < 0
assert verscmp('1.0','1.0') == 0
assert verscmp('1.0','1.2') < 0
assert verscmp('1.1','1.0') > 0
assert verscmp('1.1a','1.0') > 0
assert verscmp('1.1a','1.0a') > 0
assert verscmp('1.0a','1.0a') == 0
assert verscmp('1.0b','1.0a') > 0
assert verscmp('1.0a','1.0b') < 0
assert verscmp('1.0a','1.0c') < 0
assert verscmp('1.0a','1.0d') < 0
assert verscmp('1.0a','1.0a.1') < 0
assert verscmp('1.0a.1','1.0a') > 0
assert verscmp('1.0a.2','1.0a') > 0
assert verscmp('1.0a','1.0.0b') < 0
assert verscmp('1.0','1.0.0b') > 0
assert verscmp('1.0alpha','1.0.0') < 0
assert verscmp('1.alpha','1.0') < 0
assert verscmp('1.2alpha','1.2') < 0
assert verscmp('1.2alpha.1','1.2.1') < 0
assert verscmp('1alpha.2.1','1.0') < 0
assert verscmp('1alpha.','1alpha') == 0
assert verscmp('1.0.0.0.0','1.0') == 0
assert verscmp('1.0.0.0.1','1.0') > 0
# interactive
print ('interactive')
print 'Python is operating in %sinteractive mode' % \
('non-' * (not mx.Tools.interactive()))
print 'Works.'
|
# stdlib imports
import os
import numpy as np
import urllib
import json
from datetime import timedelta, datetime
import collections
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# import numpy as np
import sqlite3 as lite
import pandas as pd
# local imports
from mapio.shake import getHeaderData
from libcomcat.search import get_event_by_id, search
from mapio.multihaz import MultiHazardGrid
from gfail.stats import get_rangebeta, get_pdfbeta
from mapio.gdal import GDALGrid
from mapio.gmt import GMTGrid
# Don't delete this, it's needed in an eval function
import matplotlib.cm as cm # DO NOT DELETE
# DO NOT DELETE ABOVE LINE
# Define bin edges (lower and upper are clipped here but
# are not clipped in reality)
lshbins = [0.1, 1., 10., 100., 1000.]
lspbins = [10., 100., 1000., 10000., 1e5]
lqhbins = [1., 10., 100., 1000., 10000.]
lqpbins = [100., 1000., 10000., 100000., 1e6]
def is_grid_point_source(grid):
"""Was the shakemap grid constructed with a point source?
This makes use of the 'urat' layer, which is the ratio of the predicted
ground motion standard deviation to the GMPE standard deviation. The only
reason this could ever be greater than 1.0 is if the uncertainty of the
prediction is inflated due to the point source approxmiation; further,
if a point source was used, there will always be some
locations with 'urat' > 1.0.
Args:
grid (ShakeGrid): A ShakeGrid object from MapIO.
Returns:
bool: True if point rupture.
"""
data = grid.getData()
urat = data['urat'].getData()
max_urat = np.max(urat)
if max_urat > (1 + np.finfo(float).eps):
return True
else:
return False
def get_event_comcat(shakefile, timewindow=60, degwindow=0.3, magwindow=0.2):
"""
Find an event in comcat, searching first by event id and if that
fails searching by magnitude, time, and location.
Args:
shakefile (str): path to shakemap .xml file of event to find
timewindow (float): width of time window to search around time defined
in shakefile (in seconds)
degwindow (float): width of area to search around location specified in
shakefile (in degrees).
magwindow (float): width of magnitude window to search around the
magnitude specified in shakefile.
Returns:
None if event not found, else tuple (info, detail, shakemap) where,
* info: json formatted dictionary of info.json for the event
* detail: event detail from comcat
* shakemap: shakemap of event found (from comcat)
"""
header_dicts = getHeaderData(shakefile)
grid_dict = header_dicts[0]
event_dict = header_dicts[1]
#version = grid_dict['shakemap_version']
shaketime = grid_dict['process_timestamp']
try:
eid = event_dict['event_id']
net = 'us'
if 'event_network' in event_dict:
net = event_dict['event_network']
if not eid.startswith(net):
eid = net + eid
detail = get_event_by_id(eid, includesuperseded=True)
except Exception as e:
lat = event_dict['lat']
lon = event_dict['lon']
mag = event_dict['magnitude']
time = event_dict['event_timestamp']
starttime = time - timedelta(seconds=timewindow)
endtime = time + timedelta(seconds=timewindow)
minlat = lat - degwindow
minlon = lon - degwindow
maxlat = lat + degwindow
maxlon = lon + degwindow
minmag = max(0, mag - magwindow)
maxmag = min(10, mag + magwindow)
events = search(starttime=starttime,
endtime=endtime,
minmagnitude=minmag,
maxmagnitude=maxmag,
minlatitude=minlat,
minlongitude=minlon,
maxlatitude=maxlat,
maxlongitude=maxlon)
if not len(events):
return None
detail = events[0].getDetailEvent()
allversions = detail.getProducts('shakemap', version='all')
# Find the right version
dates1 = [allv.product_timestamp for allv in allversions]
dates = np.array([datetime.fromtimestamp(int(str(dat)[:10])) for dat in dates1])
idx = np.argmin(np.abs(dates-shaketime))
#vers = [allv.version for allv in allversions]
#idx = np.where(np.array(vers) == version)[0][0]
shakemap = allversions[idx]
infobytes, url = shakemap.getContentBytes('info.json')
info = json.loads(infobytes.decode('utf-8'))
return info, detail, shakemap
def parseConfigLayers(maplayers, config, keys=None):
"""
Parse things that need to coodinate with each layer (like lims, logscale,
colormaps etc.) from config file, in right order, where the order is from
maplayers.
Args:
maplayers (dict): Dictionary containing model output.
config (ConfigObj): Config object describing options for specific
model.
keys (list): List of keys of maplayers to process, e.g. ``['model']``.
Returns:
tuple: (plotorder, logscale, lims, colormaps, maskthreshes) where:
* plotorder: maplayers keys in order of plotting.
* logscale: list of logscale options from config corresponding to
keys in plotorder (same order).
* lims: list of colorbar limits from config corresponding to keys
in plotorder (same order).
* colormaps: list of colormaps from config corresponding to keys
in plotorder (same order),
* maskthreshes: list of mask thresholds from config corresponding
to keys in plotorder (same order).
"""
# TODO:
# - Add ability to interpret custom color maps.
# get all key names, create a plotorder list in case maplayers is not an
# ordered dict, making sure that anything called 'model' is first
if keys is None:
keys = list(maplayers.keys())
plotorder = []
configkeys = list(config.keys())
try:
limits = config[configkeys[0]]['display_options']['lims']
lims = []
except:
lims = None
limits = None
try:
colors = config[configkeys[0]]['display_options']['colors']
colormaps = []
except:
colormaps = None
colors = None
try:
logs = config[configkeys[0]]['display_options']['logscale']
logscale = []
except:
logscale = False
logs = None
try:
masks = config[configkeys[0]]['display_options']['maskthresholds']
maskthreshes = []
except:
maskthreshes = None
masks = None
try:
default = \
config[configkeys[0]]['display_options']['colors']['default']
default = eval(default)
except:
default = None
for i, key in enumerate(keys):
plotorder += [key]
if limits is not None:
found = False
for lim1 in limits:
if lim1 in key:
if type(limits[lim1]) is list:
getlim = np.array(limits[lim1]).astype(np.float)
else:
try:
getlim = eval(limits[lim1])
except:
getlim = None
lims.append(getlim)
found = True
if not found:
lims.append(None)
if colors is not None:
found = False
for c in colors:
if c in key:
getcol = colors[c]
colorobject = eval(getcol)
if colorobject is None:
colorobject = default
colormaps.append(colorobject)
found = True
if not found:
colormaps.append(default)
if logs is not None:
found = False
for g in logs:
getlog = False
if g in key:
if logs[g].lower() == 'true':
getlog = True
logscale.append(getlog)
found = True
if not found:
logscale.append(False)
if masks is not None:
found = False
for m in masks:
if m in key:
getmask = eval(masks[m])
maskthreshes.append(getmask)
found = True
if not found:
maskthreshes.append(None)
# Reorder everything so model is first, if it's not already
if plotorder[0] != 'model':
indx = [idx for idx, key in enumerate(plotorder) if key == 'model']
if len(indx) == 1:
indx = indx[0]
firstpo = plotorder.pop(indx)
plotorder = [firstpo] + plotorder
firstlog = logscale.pop(indx)
logscale = [firstlog] + logscale
firstlim = lims.pop(indx)
lims = [firstlim] + lims
firstcol = colormaps.pop(indx)
colormaps = [firstcol] + colormaps
return plotorder, logscale, lims, colormaps, maskthreshes
def text_to_json(input1):
"""Simplification of text_to_json from shakelib.rupture.factory
Args:
input1 (str): url or filepath to text file
Returns:
json formatted stream of input1
"""
if os.path.exists(input1):
with open(input1, 'r') as f:
lines = f.readlines()
else:
with urllib.request.urlopen(input1) as f:
lines = f.readlines()
x = []
y = []
z = []
reference = ''
# convert to geojson
for line in lines:
sline = line.strip()
if sline.startswith('#'):
reference += sline.strip('#').strip('Source: ')
continue
if sline.startswith('>'):
if len(x): # start of new line segment
x.append(np.nan)
y.append(np.nan)
z.append(np.nan)
continue
else: # start of file
continue
if not len(sline.strip()):
continue
parts = sline.split()
y.append(float(parts[0]))
x.append(float(parts[1]))
if len(parts) >= 3:
z.append(float(parts[2]))
else:
print('Fault file has no depths, assuming zero depth')
z.append(0.0)
coords = []
poly = []
for lon, lat, dep in zip(x, y, z):
if np.isnan(lon):
coords.append(poly)
poly = []
else:
poly.append([lon, lat, dep])
if poly != []:
coords.append(poly)
d = {
"type": "FeatureCollection",
"metadata": {
'reference': reference
},
"features": [
{
"type": "Feature",
"properties": {
"rupture type": "rupture extent"
},
"geometry": {
"type": "MultiPolygon",
"coordinates": [coords]
}
}
]
}
return json.dumps(d)
def write_floats(filename, grid2d):
"""Create a binary (with acc. header file) version of a Grid2D object.
Args:
filename (str): String filename to write (i.e., 'probability.flt')
grid2d (Grid2D): MapIO Grid2D object.
Returns:
Given a filename input of "probability.flt", this function will
create that file, plus a text file called "probability.hdr".
"""
geodict = grid2d.getGeoDict().asDict()
array = grid2d.getData().astype('float32')
np.save(filename, array)
npyfilename = filename + '.npy'
os.rename(npyfilename, filename)
fpath, fname = os.path.split(filename)
fbase, _ = os.path.splitext(fname)
hdrfile = os.path.join(fpath, fbase + '.hdr')
f = open(hdrfile, 'wt')
for key, value in geodict.items():
if isinstance(value, int):
fmt = '%s = %i\n'
elif isinstance(value, float):
fmt = '%s = %.4f\n'
else:
fmt = '%s = %s\n'
f.write(fmt % (key, value))
f.close()
def savelayers(grids, filename):
"""
Save ground failure layers object as a MultiHazard HDF file, preserving
metadata structures. All layers must have same geodictionary.
Args:
grids: Ground failure layers object.
filename (str): Path to where you want to save this file.
Returns:
.hdf5 file containing ground failure layers
"""
layers = collections.OrderedDict()
metadata = collections.OrderedDict()
for key in list(grids.keys()):
layers[key] = grids[key]['grid'].getData()
metadata[key] = {
'description': grids[key]['description'],
'type': grids[key]['type'],
'label': grids[key]['label']
}
origin = {}
header = {}
mgrid = MultiHazardGrid(layers, grids[key]['grid'].getGeoDict(),
origin,
header,
metadata=metadata)
mgrid.save(filename)
def loadlayers(filename):
"""
Load a MultiHazard HDF file back in as a ground failure layers object in
active memory (must have been saved for this purpose).
Args:
filename (str): Path to layers file (hdf5 extension).
Returns:
Ground failure layers object
"""
mgrid = MultiHazardGrid.load(filename)
grids = collections.OrderedDict()
for key in mgrid.getLayerNames():
grids[key] = {
'grid': mgrid.getData()[key],
'description': mgrid.getMetadata()[key]['description'],
'type': mgrid.getMetadata()[key]['type'],
'label': mgrid.getMetadata()[key]['label']
}
return grids
def get_alert(paramalertLS, paramalertLQ, parampopLS, parampopLQ,
hazbinLS=[1., 10., 100.], popbinLS=[100, 1000, 10000],
hazbinLQ=[10., 100., 1000.], popbinLQ=[1000, 10000, 100000]):
"""
Get alert levels
Args:
paramalertLS (float): Hazard statistic of preferred landslide model
paramalertLQ (float): Hazard statistic of preferred liquefaction model
parampopLS (float): Exposure statistic of preferred landslide model
parampopLQ (float): Exposure statistic of preferred liquefaction model
hazbinLS (list): 3 element list of bin edges for landslide
hazard alert between Green and Yellow, Yellow and Orange, and
Orange and Red.
popbinLS (list): same as above but for population exposure
hazbinLQ (list): 3 element list of bin edges for liquefaction hazard
alert between Green and Yellow, Yellow and Orange, and Orange
and Red.
popbinLQ (list): same as above but for population exposure
Returns:
Returns:
tuple: (hazLS, popLS, hazLQ, popLQ, LS, LQ) where:
* hazLS: the landslide hazard alert level (str)
* popLS: the landslide population alert level (str)
* hazLQ: the liquefaction hazard alert level (str)
* popLQ: the liquefaction population alert level (str)
* LS: the overall landslide alert level (str)
* LQ: the overall liquefaction alert level (str)
"""
if paramalertLS is None:
hazLS = None
elif paramalertLS < hazbinLS[0]:
hazLS = 'green'
elif paramalertLS >= hazbinLS[0] and paramalertLS < hazbinLS[1]:
hazLS = 'yellow'
elif paramalertLS >= hazbinLS[1] and paramalertLS < hazbinLS[2]:
hazLS = 'orange'
elif paramalertLS > hazbinLS[2]:
hazLS = 'red'
else:
hazLS = None
if parampopLS is None:
popLS = None
elif parampopLS < popbinLS[0]:
popLS = 'green'
elif parampopLS >= popbinLS[0] and parampopLS < popbinLS[1]:
popLS = 'yellow'
elif parampopLS >= popbinLS[1] and parampopLS < popbinLS[2]:
popLS = 'orange'
elif parampopLS >= popbinLS[2]:
popLS = 'red'
else:
popLS = None
if paramalertLQ is None:
hazLQ = None
elif paramalertLQ < hazbinLQ[0]:
hazLQ = 'green'
elif paramalertLQ >= hazbinLQ[0] and paramalertLQ < hazbinLQ[1]:
hazLQ = 'yellow'
elif paramalertLQ >= hazbinLQ[1] and paramalertLQ < hazbinLQ[2]:
hazLQ = 'orange'
elif paramalertLQ >= hazbinLQ[2]:
hazLQ = 'red'
else:
hazLQ = None
if parampopLQ is None:
popLQ = None
elif parampopLQ < popbinLQ[0]:
popLQ = 'green'
elif parampopLQ >= popbinLQ[0] and parampopLQ < popbinLQ[1]:
popLQ = 'yellow'
elif parampopLQ >= popbinLQ[1] and parampopLQ < popbinLQ[2]:
popLQ = 'orange'
elif parampopLQ >= popbinLQ[2]:
popLQ = 'red'
else:
popLQ = None
num2color = {
'1': 'green',
'2': 'yellow',
'3': 'orange',
'4': 'red'
}
col2num = dict((v, k) for k, v in num2color.items())
if popLS is not None and hazLS is not None:
LSnum1 = col2num[hazLS]
LSnum2 = col2num[popLS]
LSnum = str(np.max([int(LSnum1), int(LSnum2)]))
LS = num2color[LSnum]
else:
LS = None
if popLQ is not None and hazLQ is not None:
LQnum1 = col2num[hazLQ]
LQnum2 = col2num[popLQ]
LQnum = str(np.max([int(LQnum1), int(LQnum2)]))
LQ = num2color[LQnum]
else:
LQ = None
return hazLS, popLS, hazLQ, popLQ, LS, LQ
def view_database(database, starttime=None, endtime=None,
minmag=None, maxmag=None, eventids=None,
realtime=False, currentonly=False, numevents=None,
LShazmin=None, LShazmax=None, LSpopmin=None,
LSpopmax=None, LQhazmin=None, LQhazmax=None,
LQpopmin=None, LQpopmax=None, verbose=False,
printcols=None, csvfile=None, printsummary=True,
printsuccess=False, printfailed=False,
printnotmet=False, maxcolwidth=100,
alertreport='value', realtime_maxsec=259200.):
"""
Prints out information from the ground failure database based on
search criteria and other options. If nothing is defined except the
database, it will print out a summary and details on the successful
event runs only.
Args:
database (str): file path to event database (.db file)
starttime (str): earliest earthquake time to include in the search,
can be any string date recognizable by np.datetime
endtime (str): latest earthquake time to include in the search,
can be any string date recognizable by datetime
minmag (float): minimum magnitude to include in search
maxmag (float): maximum magnitude to include in search
eventids (list): list of specific event ids to include (optional)
realtime (bool): if True, will only include events that were run in
near real time (defined by delay time less than realtime_maxsec)
currentonly (bool): if True, will only include the most recent run
of each event
numevents (int): Include the numevents most recent events that meet
search criteria
LShazmin: minimum landslide hazard alert color ('green', 'yellow',
'orange', 'red') or minimum hazard alert statistic value
LShazmax: same as above but for maximum landslide hazard alert
value/color
LSpopmin: same as above but for minimum landslide population alert
value/color
LSpopmax: same as above but for maximum landslide population alert
value/color
LQhazmin: same as above but for minimum liquefaction hazard alert
value/color
LQhazmax: same as above but for maximum liquefaction hazard alert
value/color
LQpopmin: same as above but for minimum liquefaction population alert
value/color
LQpopmax: same as above but for maximum liquefaction population alert
value/color
verbose (bool): if True, will print all columns (overridden if
printcols is assigned)
printcols (list): List of columns to print out (choose from id,
eventcode, shakemap_version, note, version, lat, lon, depth,
time, mag, location, starttime, endtime, eventdir,
finitefault, HaggLS, ExpPopLS, HaggLQ, ExpPopLQ
csvfile: If defined, saves csvfile of table of all events found
(includes all fields and failed/non-runs)
printsummary (bool): if True (default), will print summary of events
found to screen
printsuccess (bool): if True (default), will print out database entries
for successful event runs found
printfailed (bool): if True (default False), will print out information
about failed event runs
printnotmet (bool): if True (default False), will print out information
about event runs that didn't meet criteria to run ground failure
maxcolwidth (int): maximum column width for printouts of database
entries.
alertreport (str): 'value' if values of alert statistics should be
printed, or 'color' if alert level colors should be printed
realtime_maxsec (float): if realtime is True, this is the maximum delay
between event time and processing end time in seconds
to consider an event to be run in realtime
Returns:
Prints summaries and database info to screen as requested, saves a
csv file if requested. Also returns a tuple where (success, fail,
notmet, stats, criteria) where
* success: pandas dataframe of selected events that ran
successfully
* fail: pandas dataframe of selected events that failed to run
due to an error
* notmet: pandas dataframe of selected events that failed to run
because they did not meet the criteria to run ground failure
* stats: dictionary containing statistics summarizing selected
events where
* aLSg/y/o/r is the number of overall alerts of green/yellow
orange or red for landslides. If LS is replaced with LQ,
it is the same but for liquefaction.
* hazLSg/y/o/r same as above but for hazard alert level totals
* popLSg/y/o/r same as above but for population alert level
totals
* nsuccess is the number of events that ran successfully
* nfail is the number of events that failed to run
* nnotmet is the number of events that didn't run because they
did not meet criteria to run ground failure
* nunique is the number of unique earthquake events run
* nunique_success is the number of unique earthquake events
that ran successfully
* nrealtime is the number of events that ran in near-real-time
* delay_median_s is the median delay time for near-real-time
events (earthquake time until first GF run), also the same
for mean, min, max, and standard deviation
* criteria: dictionary containing info on what criteria were used
for the search
"""
import warnings
warnings.filterwarnings("ignore")
formatters = {"time": "{:%Y-%m-%d}".format,
"shakemap_version": "{:.0f}".format,
"version": "{:.0f}".format,
"starttime": "{:%Y-%m-%d %H:%M}".format,
"endtime": "{:%Y-%m-%d %H:%M}".format}
criteria = dict(locals())
# Define alert bins for later use
hazbinLS = dict(green=[0., 1], yellow=[1., 10.], orange=[10., 100.],
red=[100., 1e20])
popbinLS = dict(green=[0., 100], yellow=[100., 1000.],
orange=[1000., 10000.], red=[10000., 1e20])
hazbinLQ = dict(green=[0., 10], yellow=[10., 100.], orange=[100., 1000.],
red=[1000., 1e20])
popbinLQ = dict(green=[0., 1000], yellow=[1000., 10000.],
orange=[10000., 100000.], red=[100000., 1e20])
connection = None
connection = lite.connect(database)
pd.options.display.max_colwidth = maxcolwidth
# Read in entire shakemap table, do selection using pandas
df = pd.read_sql_query("SELECT * FROM shakemap", connection)
df['starttime'] = pd.to_datetime(df['starttime'], utc=True)
df['endtime'] = pd.to_datetime(df['endtime'], utc=True)
df['time'] = pd.to_datetime(df['time'], utc=True)
# Print currently running info to screen
print('-------------------------------------------------')
curt = df.loc[df['note'].str.contains('Currently running', na=False)]
if len(curt) > 0:
ccols = ['eventcode', 'time', 'shakemap_version', 'note', 'starttime']
ccols2 = ['eventcode', 'time', 'shake_v', 'note', 'startrun']
print('Currently running - %d runs' % len(curt))
print('-------------------------------------------------')
print(curt.to_string(columns=ccols, index=False,
justify='left', header=ccols2,
formatters=formatters))
# Remove currently running from list
df.drop(curt.index, inplace=True)
else:
print('No events currently running')
print('-------------------------------------------------')
okcols = list(df.keys())
if eventids is not None:
if not hasattr(eventids, '__len__'):
eventids = [eventids]
df = df.loc[df['eventcode'].isin(eventids)]
if minmag is not None:
df = df.loc[df['mag'] >= minmag]
if maxmag is not None:
df = df.loc[df['mag'] <= maxmag]
# Narrow down the database based on input criteria
# set default values for start and end
endt = pd.to_datetime('now', utc=True)
stt = pd.to_datetime('1700-01-01', utc=True)
if starttime is not None:
stt = pd.to_datetime(starttime, utc=True)
if endtime is not None:
endt = pd.to_datetime(endtime, utc=True)
df = df.loc[(df['time'] > stt) & (df['time'] <= endt)]
# Winnow down based on alert
# Assign numerical values if colors were used
if LShazmin is not None or LShazmax is not None:
if LShazmin is None:
LShazmin = 0.
if LShazmax is None:
LShazmax = 1e20
if isinstance(LShazmin, str):
LShazmin = hazbinLS[LShazmin][0]
if isinstance(LShazmax, str):
LShazmax = hazbinLS[LShazmax][1]
df = df.loc[(df['HaggLS'] >= LShazmin) & (df['HaggLS'] <= LShazmax)]
if LQhazmin is not None or LQhazmax is not None:
if LQhazmin is None:
LQhazmin = 0.
if LQhazmax is None:
LQhazmax = 1e20
if isinstance(LQhazmin, str):
LQhazmin = hazbinLQ[LQhazmin][0]
if isinstance(LQhazmax, str):
LQhazmax = hazbinLQ[LQhazmax][1]
df = df.loc[(df['HaggLQ'] >= LQhazmin) & (df['HaggLQ'] <= LQhazmax)]
if LSpopmin is not None or LSpopmax is not None:
if LSpopmin is None:
LSpopmin = 0.
if LSpopmax is None:
LSpopmax = 1e20
if isinstance(LSpopmin, str):
LSpopmin = popbinLS[LSpopmin][0]
if isinstance(LSpopmax, str):
LSpopmax = popbinLS[LSpopmax][1]
df = df.loc[(df['ExpPopLS'] >= LSpopmin) &
(df['ExpPopLS'] <= LSpopmax)]
if LQpopmin is not None or LQpopmax is not None:
if LQpopmin is None:
LQpopmin = 0.
if LQpopmax is None:
LQpopmax = 1e20
if isinstance(LQpopmin, str):
LQpopmin = popbinLQ[LQpopmin][0]
if isinstance(LQpopmax, str):
LQpopmax = popbinLQ[LQpopmax][1]
df = df.loc[(df['ExpPopLQ'] >= LQpopmin) &
(df['ExpPopLQ'] <= LQpopmax)]
# Figure out which were run in real time
delays = []
event_codes = df['eventcode'].values
elist, counts = np.unique(event_codes, return_counts=True)
keep = []
rejects = []
for idx in elist:
vers = df.loc[df['eventcode'] == idx]['shakemap_version'].values
if len(vers) == 0:
rejects.append(idx)
delays.append(float('nan'))
continue
sel1 = df.loc[df['eventcode'] == idx]
# vermin = np.nanmin(vers)
# sel1 = df.loc[(df['eventcode'] == idx) &
# (df['shakemap_version'] == vermin)]
if len(sel1) > 0:
dels = []
for index, se in sel1.iterrows():
dels.append(np.timedelta64(se['endtime'] - se['time'], 's').astype(int))
delay = np.nanmin(dels)
if delay <= realtime_maxsec:
keep.append(idx)
delays.append(delay)
else:
delays.append(float('nan'))
else:
rejects.append(idx)
delays.append(float('nan'))
if realtime: # Keep just realtime events
df = df.loc[df['eventcode'].isin(keep)]
# Remove any bad/incomplete entries
df = df.loc[~df['eventcode'].isin(rejects)]
# Get only latest version for each event id if requested
if currentonly:
df.insert(0, 'Current', 0)
ids = np.unique(df['eventcode'])
for id1 in ids:
# Get most recent one for each
temp = df.loc[df['eventcode'] == id1].copy()
dels2 = []
for index, te in temp.iterrows():
dels2.append(np.timedelta64(te['endtime'] - te['time'], 's').astype(int))
idx = np.argmax(dels2)
df.loc[df['endtime'] == temp.iloc[idx]['endtime'], 'Current'] = 1
df = df.loc[df['Current'] == 1]
df.drop_duplicates(inplace=True)
# Keep just the most recent number requested
if numevents is not None and numevents < len(df):
df = df.iloc[(numevents*-1):]
# Now that have requested dataframe, make outputs
success = df.loc[(df['note'] == '') |
(df['note'].str.contains('adjusted to'))]
fail = df.loc[df['note'].str.contains('fail')]
notmet = df.loc[(~df['note'].str.contains('fail')) & (df['note'] != '') &
(~df['note'].str.contains('adjusted to'))]
if len(df) == 0:
print('No matching GF runs found')
return
cols = []
if printcols is not None:
for p in printcols:
if p in okcols:
cols.append(p)
else:
print('column %s defined in printcols does not exist in the '
'database' % p)
elif verbose:
cols = okcols
else: # List of what we usually want to see
cols = ['eventcode', 'mag', 'location', 'time', 'shakemap_version',
'version', 'HaggLS', 'ExpPopLS', 'HaggLQ', 'ExpPopLQ']
# Compute overall alert stats (just final for each event)
# get unique event code list of full database
codes = df['eventcode'].values
allevids, count = np.unique(codes, return_counts=True)
nunique = len(allevids)
# get unique event code list for success
event_codes = success['eventcode'].values
elist2, count = np.unique(event_codes, return_counts=True)
nunique_success = len(elist2)
# Get delays just for these events
delays = np.array(delays)
del_set = []
for el in elist2:
del_set.append(delays[np.where(elist == el)][0])
# get final alert values for each
hazalertLS = []
hazalertLQ = []
popalertLS = []
popalertLQ = []
alertLS = []
alertLQ = []
# Currently includes just the most current one
for idx in elist2:
vers = np.nanmax(success.loc[success['eventcode'] == idx]
['shakemap_version'].values)
# endt5 = np.nanmax(success.loc[success['eventcode'] == idx]
# ['endtime'].values)
sel1 = success.loc[(success['eventcode'] == idx) &
(success['shakemap_version'] == vers)]
out = get_alert(sel1['HaggLS'].values[-1],
sel1['HaggLQ'].values[-1],
sel1['ExpPopLS'].values[-1],
sel1['ExpPopLQ'].values[-1])
hazLS, popLS, hazLQ, popLQ, LS, LQ = out
hazalertLS.append(hazLS)
hazalertLQ.append(hazLQ)
popalertLS.append(popLS)
popalertLQ.append(popLQ)
alertLS.append(LS)
alertLQ.append(LQ)
origsuc = success.copy() # Keep copy
# Convert all values to alert colors
for index, row in success.iterrows():
for k, bins in hazbinLS.items():
if row['HaggLS'] >= bins[0] and row['HaggLS'] < bins[1]:
success.loc[index, 'HaggLS'] = k
for k, bins in hazbinLQ.items():
if row['HaggLQ'] >= bins[0] and row['HaggLQ'] < bins[1]:
success.loc[index, 'HaggLQ'] = k
for k, bins in popbinLS.items():
if row['ExpPopLS'] >= bins[0] and row['ExpPopLS'] < bins[1]:
success.loc[index, 'ExpPopLS'] = k
for k, bins in popbinLQ.items():
if row['ExpPopLQ'] >= bins[0] and row['ExpPopLQ'] < bins[1]:
success.loc[index, 'ExpPopLQ'] = k
# Compile stats
stats = dict(aLSg=len([a for a in alertLS if a == 'green']),
aLSy=len([a for a in alertLS if a == 'yellow']),
aLSo=len([a for a in alertLS if a == 'orange']),
aLSr=len([a for a in alertLS if a == 'red']),
hazLSg=len([a for a in hazalertLS if a == 'green']),
hazLSy=len([a for a in hazalertLS if a == 'yellow']),
hazLSo=len([a for a in hazalertLS if a == 'orange']),
hazLSr=len([a for a in hazalertLS if a == 'red']),
popLSg=len([a for a in popalertLS if a == 'green']),
popLSy=len([a for a in popalertLS if a == 'yellow']),
popLSo=len([a for a in popalertLS if a == 'orange']),
popLSr=len([a for a in popalertLS if a == 'red']),
aLQg=len([a for a in alertLQ if a == 'green']),
aLQy=len([a for a in alertLQ if a == 'yellow']),
aLQo=len([a for a in alertLQ if a == 'orange']),
aLQr=len([a for a in alertLQ if a == 'red']),
hazLQg=len([a for a in hazalertLQ if a == 'green']),
hazLQy=len([a for a in hazalertLQ if a == 'yellow']),
hazLQo=len([a for a in hazalertLQ if a == 'orange']),
hazLQr=len([a for a in hazalertLQ if a == 'red']),
popLQg=len([a for a in popalertLQ if a == 'green']),
popLQy=len([a for a in popalertLQ if a == 'yellow']),
popLQo=len([a for a in popalertLQ if a == 'orange']),
popLQr=len([a for a in popalertLQ if a == 'red']),
nsuccess=len(success),
nfail=len(fail),
nnotmet=len(notmet),
nruns=len(df),
nunique=nunique,
nunique_success=nunique_success,
nrealtime=np.sum(np.isfinite(del_set)),
delay_median_s=float('nan'),
delay_mean_s=float('nan'),
delay_min_s=float('nan'),
delay_max_s=float('nan'),
delay_std_s=float('nan')
)
if np.sum(np.isfinite(del_set)) > 0:
stats['delay_median_s'] = np.nanmedian(del_set)
stats['delay_mean_s'] = np.nanmean(del_set)
stats['delay_min_s'] = np.nanmin(del_set)
stats['delay_max_s'] = np.nanmax(del_set)
stats['delay_std_s'] = np.nanstd(del_set)
# If date range not set by user
if starttime is None:
starttime = np.min(df['starttime'].values)
if endtime is None:
endtime = np.max(df['endtime'].values)
# Now output selections in text, csv files, figures
if csvfile is not None:
name, ext = os.path.splitext(csvfile)
if ext == '':
csvfile = '%s.csv' % name
if os.path.dirname(csvfile) == '':
csvfile = os.path.join(os.getcwd(), csvfile)
# make sure it's a path
if os.path.isdir(os.path.dirname(csvfile)):
df.to_csv(csvfile)
else:
raise Exception('Cannot save csv file to %s' % csvfile)
# Print to screen
if stats['nsuccess'] > 0 and printsuccess:
print('Successful - %d runs' % stats['nsuccess'])
print('-------------------------------------------------')
cols2 = np.array(cols).copy()
cols2[cols2 == 'shakemap_version'] = 'shake_v' # to save room printing
cols2[cols2 == 'version'] = 'gf_v' # to save room printing
cols2[cols2 == 'time'] = 'date' # to save room printing
if alertreport == 'color':
print(success.to_string(columns=cols, index=False, justify='left',
header=list(cols2),
formatters=formatters))
else:
print(origsuc.to_string(columns=cols, index=False, justify='left',
header=list(cols2),
formatters=formatters))
print('-------------------------------------------------')
if printfailed:
if stats['nfail'] > 0:
failcols = ['eventcode', 'location', 'mag', 'time',
'shakemap_version', 'note', 'starttime', 'endtime']
failcols2 = ['eventcode', 'location', 'mag', 'time',
'shake_v', 'note', 'startrun', 'endrun']
# if 'note' not in cols:
# cols.append('note')
print('Failed - %d runs' % stats['nfail'])
print('-------------------------------------------------')
print(fail.to_string(columns=failcols, index=False,
formatters=formatters, justify='left', header=failcols2))
else:
print('No failed runs found')
print('-------------------------------------------------')
if printnotmet:
if stats['nnotmet'] > 0:
failcols = ['eventcode', 'location', 'mag', 'time',
'shakemap_version', 'note', 'starttime', 'endtime']
failcols2 = ['eventcode', 'location', 'mag', 'time',
'shake_v', 'note', 'startrun', 'endrun']
print('Criteria not met - %d runs' % stats['nnotmet'])
print('-------------------------------------------------')
print(notmet.to_string(columns=failcols, index=False,
justify='left', header=failcols2,
formatters=formatters))
else:
print('No runs failed to meet criteria')
print('-------------------------------------------------')
if printsummary:
print('Summary %s to %s' % (str(starttime)[:10], str(endtime)[:10]))
print('-------------------------------------------------')
print('Of total of %d events run (%d unique)' % (stats['nruns'],
stats['nunique']))
print('\tSuccessful: %d (%d unique)' % (stats['nsuccess'],
stats['nunique_success']))
print('\tFailed: %d' % stats['nfail'])
print('\tCriteria not met: %d' % stats['nnotmet'])
print('\tRealtime: %d' % stats['nrealtime'])
print('\tMedian realtime delay: %1.1f mins' %
(stats['delay_median_s']/60.,))
print('-------------------------------------------------')
print('Landslide overall alerts')
print('-------------------------------------------------')
print('Green: %d' % stats['aLSg'])
print('Yellow: %d' % stats['aLSy'])
print('Orange: %d' % stats['aLSo'])
print('Red: %d' % stats['aLSr'])
print('-------------------------------------------------')
print('Liquefaction overall alerts')
print('-------------------------------------------------')
print('Green: %d' % stats['aLQg'])
print('Yellow: %d' % stats['aLQy'])
print('Orange: %d' % stats['aLQo'])
print('Red: %d' % stats['aLQr'])
print('-------------------------------------------------')
print('Landslide hazard alerts')
print('-------------------------------------------------')
print('Green: %d' % stats['hazLSg'])
print('Yellow: %d' % stats['hazLSy'])
print('Orange: %d' % stats['hazLSo'])
print('Red: %d' % stats['hazLSr'])
print('-------------------------------------------------')
print('Landslide population alerts')
print('-------------------------------------------------')
print('Green: %d' % stats['popLSg'])
print('Yellow: %d' % stats['popLSy'])
print('Orange: %d' % stats['popLSo'])
print('Red: %d' % stats['popLSr'])
print('-------------------------------------------------')
print('Liquefaction hazard alerts')
print('-------------------------------------------------')
print('Green: %d' % stats['hazLQg'])
print('Yellow: %d' % stats['hazLQy'])
print('Orange: %d' % stats['hazLQo'])
print('Red: %d' % stats['hazLQr'])
print('-------------------------------------------------')
print('Liquefaction population alerts')
print('-------------------------------------------------')
print('Green: %d' % stats['popLQg'])
print('Yellow: %d' % stats['popLQy'])
print('Orange: %d' % stats['popLQo'])
print('Red: %d' % stats['popLQr'])
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
if alertreport == 'value':
return origsuc, fail, notmet, stats, criteria
else:
return success, fail, notmet, stats, criteria
def alert_summary(database, starttime=None, endtime=None,
minmag=None, maxmag=None, realtime=True, currentonly=True,
filebasename=None,
summarytypes='all'):
"""
Print summary plot of alerts that have been issued for set of events met
by defined criteria
Args:
database (str): file path to event database (.db file)
starttime (str): earliest earthquake time to include in the search,
can be any string date recognizable by np.datetime
endtime (str): latest earthquake time to include in the search,
can be any string date recognizable by datetime
minmag (float): minimum magnitude to include in search
maxmag (float): maximum magnitude to include in search
realtime (bool): if True, will only include events that were run in
near real time (defined by delay time less than realtime_maxsec)
currentonly (bool): if True, will only include the most recent run
of each event
filebasename (str): If defined, will save a file with a modified
version of this name depending on which alert is displayed, if no
path is given it will save in current directory.
summarytypes (str): if 'all', will create three figures, one for
overall alerts, one for hazard alerts, and one for population
alerts. If 'overall', 'hazard', or 'population' it will create
just the one selected.
Returns:
List of figure handles in order ['overall', 'hazard', 'population']
Figure files of alert level summaries
"""
out = view_database(database, starttime=starttime, endtime=endtime,
minmag=minmag, maxmag=maxmag, realtime=realtime,
currentonly=currentonly, printsummary=False,
printsuccess=False, alertreport='color')
stats = out[3]
statsLS = []
statsLQ = []
types = []
if summarytypes == 'overall' or summarytypes == 'all':
statsLS.append([stats['aLSg'], stats['aLSy'], stats['aLSo'],
stats['aLSr']])
statsLQ.append([stats['aLQg'], stats['aLQy'], stats['aLQo'],
stats['aLQr']])
types.append('overall')
if summarytypes == 'hazard' or summarytypes == 'all':
statsLS.append([stats['hazLSg'], stats['hazLSy'], stats['hazLSo'],
stats['hazLSr']])
statsLQ.append([stats['hazLQg'], stats['hazLQy'], stats['hazLQo'],
stats['hazLQr']])
types.append('hazard')
if summarytypes == 'population' or summarytypes == 'all':
statsLS.append([stats['popLSg'], stats['popLSy'], stats['popLSo'],
stats['popLSr']])
statsLQ.append([stats['popLQg'], stats['popLQy'], stats['popLQo'],
stats['popLQr']])
types.append('population')
figs = []
for sLS, sLQ, typ in zip(statsLS, statsLQ, types):
fig, ax = plt.subplots()
index = np.arange(4)
bar_width = 0.35
fontsize = 12
rects1 = ax.bar(index, sLS, bar_width,
alpha=0.3,
color='g',
label='Landslide Alerts')
rects2 = ax.bar(index + bar_width, sLQ, bar_width,
alpha=0.7,
color='g',
label='Liquefaction Alerts')
colors = ['g', 'y', 'orange', 'r']
for r1, r2, c in zip(rects1, rects2, colors):
if c == 'g':
val = 1.00
else:
val = 1.05
r1.set_color(c)
r1.set_hatch('.')
height = r1.get_height()
ax.text(r1.get_x() + r1.get_width()/2., val*height,
'%d' % int(height),
ha='center', va='bottom', color=c,
size=fontsize-2)
r2.set_color(c)
r2.set_hatch('/')
height = r2.get_height()
ax.text(r2.get_x() + r2.get_width()/2., val*height,
'%d' % int(height),
ha='center', va='bottom', color=c,
size=fontsize-2)
ax.set_xlabel('Alert', fontsize=fontsize)
ax.set_ylabel('Total Events', fontsize=fontsize)
ax.legend(fontsize=fontsize)
plt.title('Ground failure %s alerts' % typ, fontsize=fontsize)
plt.xticks(index + bar_width/2, ('Green', 'Yellow', 'Orange', 'Red'),
fontsize=fontsize-2)
plt.yticks(fontsize=fontsize-2)
plt.show()
if filebasename is not None:
name, ext = os.path.splitext(filebasename)
if ext == '':
ext = '.png'
fig.savefig('%s_%s%s' % (name, typ, ext), bbox_inches='tight')
figs.append(fig)
return figs
def plot_evolution(database, starttime=None, endtime=None,
minmag=None, maxmag=None, eventids=None,
filebasename=None, changeonly=True,
percrange=None):
"""
Make a plot and print stats showing delay times and changes in alert
statistics over time
Args:
database (str): file path to event database (.db file)
starttime (str): earliest earthquake time to include in the search,
can be any string date recognizable by np.datetime
endtime (str): latest earthquake time to include in the search,
can be any string date recognizable by datetime
minmag (float): minimum magnitude to include in search
maxmag (float): maximum magnitude to include in search
eventids (list): list of specific event ids to include (optional)
filebasename (str): If defined, will save a file with a modified
version of this name depending on which alert is displayed, if no
path is given it will save in current directory.
changeonly (bool): if True will only show events that changed alert
level at least once in the time evolution plots (unless eventids
are defined, then all will show)
percrange: percentile to use for error bars to show uncertainty
as value <1 (e.g., 0.95). If None, errors will not be shown
Returns:
Figures showing alert changes over time and delay and alert change
statistics
"""
fontsize = 10
out = view_database(database, starttime=starttime, endtime=endtime,
minmag=minmag, maxmag=maxmag, realtime=True,
currentonly=False, printsummary=False,
printsuccess=False, alertreport='value',
eventids=eventids)
if out is None:
raise Exception('No events found that meet criteria')
if eventids is not None:
changeonly = False
success = out[0].sort_values('starttime')
elist = np.unique(success['eventcode'].values)
HaggLS = []
HaggLQ = []
ExpPopLS = []
ExpPopLQ = []
eventtime = []
times = []
alertLS = []
alertLQ = []
descrip = []
rangeHLS = []
rangeHLQ = []
rangeELS = []
rangeELQ = []
for idx in elist:
sel1 = success.loc[success['eventcode'] == idx]
hls = sel1['HaggLS'].values
hlq = sel1['HaggLQ'].values
pls = sel1['ExpPopLS'].values
plq = sel1['ExpPopLQ'].values
als = []
alq = []
for s, q, ps, pq in zip(hls, hlq, pls, plq):
_, _, _, _, als1, alq1 = get_alert(s, q, ps, pq)
als.append(als1)
alq.append(alq1)
alertLS.append(als)
alertLQ.append(alq)
HaggLS.append(hls)
HaggLQ.append(hlq)
ExpPopLS.append(pls)
ExpPopLQ.append(plq)
times.append(sel1['endtime'].values)
eventtime.append(sel1['time'].values[-1])
temp = success.loc[success['eventcode'] == idx]
date = str(temp['time'].values[-1]).split('T')[0]
descrip.append('M%1.1f %s (%s)' % (temp['mag'].values[-1],
temp['location'].values[-1].title(), date))
if percrange is not None:
if percrange > 1 or percrange < 0.:
raise Exception('uncertrange must be between 0 and 1')
# Get range for input percentile
# range for H
range1 = get_rangebeta(sel1['PH_LS'], sel1['QH_LS'],
prob=percrange, maxlim=sel1['HlimLS'])
temp1 = [hls-range1[0], range1[1]-hls]
temp1[0][temp1[0] < 0.] = 0 # zero out any negative values
rangeHLS.append(temp1)
range2 = get_rangebeta(sel1['PH_LQ'], sel1['QH_LQ'],
prob=percrange, maxlim=sel1['HlimLQ'])
temp2 = [hlq-range2[0], range2[1]-hlq]
temp2[0][temp2[0] < 0.] = 0 # zero out any negative values
rangeHLQ.append(temp2)
# range for E
# range for H
range3 = get_rangebeta(sel1['PE_LS'], sel1['QE_LS'],
prob=percrange, maxlim=sel1['ElimLS'])
temp3 = [pls-range3[0], range3[1]-pls]
temp3[0][temp3[0] < 0.] = 0
rangeELS.append(temp3)
range4 = get_rangebeta(sel1['PE_LQ'], sel1['QE_LQ'],
prob=percrange, maxlim=sel1['ElimLQ'])
temp4 = [plq-range4[0], range4[1]-plq]
temp4[0][temp4[0] < 0.] = 0 # zero out any negative values
rangeELQ.append(temp4)
else:
nanmat = np.empty((2, len(sel1)))
nanmat[:] = np.NaN
rangeHLS.append(nanmat)
rangeHLQ.append(nanmat)
rangeELS.append(nanmat)
rangeELQ.append(nanmat)
# Plot of changes over time to each alert level
fig1, axes = plt.subplots(2, 1) # , figsize=(10, 10))
ax1, ax2 = axes
ax1.set_title('Landslide Summary Statistics', fontsize=fontsize)
ax1.set_ylabel(r'Area Exposed to Hazard ($km^2$)', fontsize=fontsize)
ax2.set_ylabel('Population Exposure', fontsize=fontsize)
fig2, axes = plt.subplots(2, 1) # , figsize=(10, 10))
ax3, ax4 = axes
ax3.set_title('Liquefaction Summary Statistics', fontsize=fontsize)
ax3.set_ylabel(r'Area Exposed to Hazard ($km^2$)', fontsize=fontsize)
ax4.set_ylabel('Population Exposure', fontsize=fontsize)
ax2.set_xlabel('Hours after earthquake', fontsize=fontsize)
ax4.set_xlabel('Hours after earthquake', fontsize=fontsize)
lqplot = 0
lsplot = 0
lsch = 0
lqch = 0
mindel = []
zipped = zip(HaggLS, HaggLQ, ExpPopLS, ExpPopLQ, alertLS, alertLQ,
descrip, times, eventtime)
i = 0
for hls, hlq, pls, plq, als, alq, des, t, et in zipped:
resS = np.unique(als)
resL = np.unique(alq)
delays = [np.timedelta64(t1 - et, 's').astype(float) for t1 in t]
mindel.append(np.min(delays))
# Set to lower edge of green bin if zero so ratios will show up
hls = np.array(hls)
hls[hls == 0.] = lshbins[0]
hlq = np.array(hlq)
hlq[hlq == 0.] = lqhbins[0]
pls = np.array(pls)
pls[pls == 0.] = lspbins[0]
plq = np.array(plq)
plq[plq == 0.] = lqpbins[0]
if (len(resS) > 1 or 'green' not in resS) or\
(len(resL) > 1 or 'green' not in resL):
if len(resS) > 1 or not changeonly:
if percrange is not None:
ax1.errorbar(np.array(delays)/3600., hls,
yerr=rangeHLS[i], label=des)
ax2.errorbar(np.array(delays)/3600., pls, yerr=rangeELS[i])
ax1.set_xscale("log", nonposx='clip')
ax1.set_yscale("log", nonposy='clip')
ax2.set_xscale("log", nonposx='clip')
ax2.set_yscale("log", nonposy='clip')
else:
ax1.loglog(np.array(delays)/3600., hls, '.-', label=des)
ax2.loglog(np.array(delays)/3600., pls, '.-')
ax1.set_ylim([lshbins[0], np.max((lshbins[-1], np.max(hls)))])
ax2.set_ylim([lspbins[0], np.max((lspbins[-1], np.max(pls)))])
if changeonly:
lsch += 1
lsplot += 1
if len(resL) > 1 or not changeonly:
if percrange is not None:
ax3.errorbar(np.array(delays)/3600., hlq, yerr=rangeHLQ[i],
label=des)
ax4.errorbar(np.array(delays)/3600., plq, yerr=rangeELQ[i])
ax3.set_xscale("log", nonposx='clip')
ax3.set_yscale("log", nonposy='clip')
ax4.set_xscale("log", nonposx='clip')
ax4.set_yscale("log", nonposy='clip')
else:
ax3.loglog(np.array(delays)/3600., hlq, '.-', label=des)
ax4.loglog(np.array(delays)/3600., plq, '.-')
ax3.set_ylim([lqhbins[0], np.max((lqhbins[-1], np.max(hlq)))])
ax4.set_ylim([lqpbins[0], np.max((lqpbins[-1], np.max(plq)))])
if changeonly:
lqch += 1
lqplot += 1
i += 1
print('%d of %d events had a liquefaction overall alert that changed' %
(lqch, len(elist)))
print('%d of %d events had a landslide overall alert that changed' %
(lsch, len(elist)))
if lsplot < 5:
ax1.legend(fontsize=fontsize-3)
if lqplot < 5:
ax3.legend(fontsize=fontsize-3)
ax1.tick_params(labelsize=fontsize-2)
ax2.tick_params(labelsize=fontsize-2)
ax3.tick_params(labelsize=fontsize-2)
ax4.tick_params(labelsize=fontsize-2)
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
alert_rectangles(ax1, lshbins)
alert_rectangles(ax2, lspbins)
alert_rectangles(ax3, lqhbins)
alert_rectangles(ax4, lqpbins)
if filebasename is not None:
name, ext = os.path.splitext(filebasename)
if ext == '':
ext = '.png'
fig1.savefig('%s_LSalert_evolution%s' % (name, ext),
bbox_inches='tight')
fig2.savefig('%s_LQalert_evolution%s' % (name, ext),
bbox_inches='tight')
def time_delays(database, starttime=None, endtime=None,
minmag=None, maxmag=None, eventids=None,
filebasename=None):
"""
Make a plot and print stats showing delay times and changes in alert
statistics over time
Args:
database (str): file path to event database (.db file)
starttime (str): earliest earthquake time to include in the search,
can be any string date recognizable by np.datetime
endtime (str): latest earthquake time to include in the search,
can be any string date recognizable by datetime
minmag (float): minimum magnitude to include in search
maxmag (float): maximum magnitude to include in search
eventids (list): list of specific event ids to include (optional)
filebasename (str): If defined, will save a file with a modified
version of this name depending on which alert is displayed, if no
path is given it will save in current directory.
Returns:
Figure showing delay and alert change statistics
"""
out = view_database(database, starttime=starttime, endtime=endtime,
minmag=minmag, maxmag=maxmag, realtime=True,
currentonly=False, printsummary=False,
printsuccess=False, alertreport='value',
eventids=eventids)
success = out[0]
elist = np.unique(success['eventcode'].values)
HaggLS = []
HaggLQ = []
ExpPopLS = []
ExpPopLQ = []
eventtime = []
times = []
alertLS = []
alertLQ = []
descrip = []
for idx in elist:
sel1 = success.loc[success['eventcode'] == idx]
hls = sel1['HaggLS'].values
hlq = sel1['HaggLQ'].values
pls = sel1['ExpPopLS'].values
plq = sel1['ExpPopLQ'].values
als = []
alq = []
for s, q, ps, pq in zip(hls, hlq, pls, plq):
_, _, _, _, als1, alq1 = get_alert(s, q, ps, pq)
als.append(als1)
alq.append(alq1)
alertLS.append(als)
alertLQ.append(alq)
HaggLS.append(hls)
HaggLQ.append(hlq)
ExpPopLS.append(pls)
ExpPopLQ.append(plq)
times.append(sel1['endtime'].values)
eventtime.append(sel1['time'].values[-1])
temp = success.loc[success['eventcode'] == idx]
date = str(temp['time'].values[-1]).split('T')[0]
descrip.append('M%1.1f %s (%s)' % (temp['mag'].values[-1],
temp['location'].values[-1].title(), date))
mindel = []
delstableLS = []
delstableLQ = []
ratHaggLS = []
ratHaggLQ = []
ratPopLS = []
ratPopLQ = []
zipped = zip(HaggLS, HaggLQ, ExpPopLS, ExpPopLQ, alertLS, alertLQ,
descrip, elist, times, eventtime)
for hls, hlq, pls, plq, als, alq, des, el, t, et in zipped:
delays = [np.timedelta64(t1 - et, 's').astype(float) for t1 in t]
mindel.append(np.min(delays))
delstableLS.append(delays[np.min(np.where(np.array(als) == als[-1]))])
delstableLQ.append(delays[np.min(np.where(np.array(alq) == alq[-1]))])
# Set to lower edge of green bin if zero so ratios will show up
hls = np.array(hls)
hls[hls == 0.] = 0.1
ratHaggLS.append(hls[-1]/hls[0])
hlq = np.array(hlq)
hlq[hlq == 0.] = 1.
ratHaggLQ.append(hlq[-1]/hlq[0])
pls = np.array(pls)
pls[pls == 0.] = 10.
ratPopLS.append(pls[-1]/pls[0])
plq = np.array(plq)
plq[plq == 0.] = 100.
ratPopLQ.append(plq[-1]/plq[0])
# Don't bother making this plot when eventids are specified
if eventids is None or len(eventids) > 25:
# Histograms of delay times etc.
fig, axes = plt.subplots(2, 2, figsize=(10, 10), sharey='col')
ax1 = axes[0, 0]
bins = np.logspace(np.log10(0.1), np.log10(1000.), 15)
ax1.hist(np.array(mindel)/3600., color='k', edgecolor='k', alpha=0.5,
bins=bins)
ax1.set_xscale("log")
ax1.set_xlabel('Time delay to first run (hours)')
ax1.set_ylabel('Number of events')
vals = (np.nanmean(mindel)/3600., np.nanmedian(mindel)/3600.,
np.nanstd(mindel)/3600.)
# ax1.text(0.8, 0.8, 'mean: %1.1f hr\nmedian: %1.1f hr\nstd: %1.1f hr' %
# vals, transform=ax1.transAxes, ha='center', va='center')
ax1.text(0.8, 0.8, 'median: %1.1f hr' %
vals[1], transform=ax1.transAxes, ha='center', va='center')
delstableLS = np.array(delstableLS)
delstableLQ = np.array(delstableLQ)
delstable = np.max([delstableLS, delstableLQ], axis=0)
ax2 = axes[1, 0]
ax2.hist(np.array(delstable)/3600., color='k', edgecolor='k',
alpha=0.5, bins=bins)
ax2.set_xscale("log")
ax2.set_xlabel('Time delay till final alert color reached (hours)')
ax2.set_ylabel('Number of events')
vals = (np.nanmean(delstable)/3600., np.nanmedian(delstable)/3600.,
np.nanstd(delstable)/3600.)
# ax2.text(0.8, 0.8, 'mean: %1.1f hr\nmedian: %1.1f hr\nstd: %1.1f hr' %
# vals, transform=ax2.transAxes, ha='center', va='center')
ax2.text(0.8, 0.8, 'median: %1.1f hr' %
vals[1], transform=ax2.transAxes, ha='center', va='center')
print('Liquefaction overall alerts that changed stablized after a '
'median of %1.2f hours' %
(np.median(delstableLQ[delstableLQ > 0.])/3600.))
print('Landslide overall alerts that changed stablized after a median '
'of %1.2f hours' %
(np.median(delstableLS[delstableLS > 0.])/3600.))
ratHaggLS = np.array(ratHaggLS)
ratHaggLQ = np.array(ratHaggLQ)
ax3 = axes[0, 1]
bins = np.logspace(np.log10(0.01), np.log10(100.), 9)
ax3.hist(ratHaggLS[ratHaggLS != 1.], hatch='.', edgecolor='k',
alpha=0.5, fill=False, label='Landslides',
bins=bins)
ax3.hist(ratHaggLQ[ratHaggLQ != 1.], hatch='/', edgecolor='k',
alpha=0.5, fill=False, label='Liquefaction',
bins=bins)
ax3.set_xscale("log")
# ax3.set_xlabel(r'$H_{agg}$ final/$H_{agg}$ initial')
ax3.set_xlabel(r'Area Exposed to Hazard $H_{final}/H_{initial}$')
ax3.set_ylabel('Number of events')
ax3.axvline(1., lw=2, color='k')
arrowprops = dict(facecolor='black', width=1., headwidth=7.,
headlength=7.)
ax3.annotate('No change:\nLS=%d\nLQ=%d' %
(len(ratHaggLS[ratHaggLS == 1.]),
len(ratHaggLQ[ratHaggLQ == 1.])),
xy=(0.5, 0.6), xycoords='axes fraction',
textcoords='axes fraction', ha='center', va='center',
xytext=(0.3, 0.6),
arrowprops=arrowprops)
ax3.legend(handlelength=2, handleheight=3, loc='upper right')
ratPopLS = np.array(ratPopLS)
ratPopLQ = np.array(ratPopLQ)
ax4 = axes[1, 1]
bins = np.logspace(np.log10(0.01), np.log10(100.), 9)
ax4.hist(ratPopLS[ratPopLS != 1.], hatch='.', edgecolor='k',
alpha=0.5, fill=False, bins=bins)
ax4.hist(ratPopLQ[ratPopLQ != 1.], bins=bins,
hatch='/', edgecolor='k', alpha=0.5, fill=False)
ax4.set_xscale("log")
ax4.set_xlabel(r'Population Exposure $E_{final}/E_{initial}$')
ax4.set_ylabel('Number of events')
ax4.axvline(1., lw=2, color='k')
ax4.annotate('No change:\nLS=%d\nLQ=%d' %
(len(ratPopLS[ratPopLS == 1.]),
len(ratPopLQ[ratPopLQ == 1.])),
xy=(0.5, 0.75), xycoords='axes fraction',
textcoords='axes fraction', xytext=(0.3, 0.75),
arrowprops=arrowprops, ha='center', va='center')
# Add letters
ax1.text(0.02, 0.98, 'a)', transform=ax1.transAxes, ha='left',
va='top', fontsize=14)
ax2.text(0.02, 0.98, 'b)', transform=ax2.transAxes, ha='left',
va='top', fontsize=14)
ax3.text(0.02, 0.98, 'c)', transform=ax3.transAxes, ha='left',
va='top', fontsize=14)
ax4.text(0.02, 0.98, 'd)', transform=ax4.transAxes, ha='left',
va='top', fontsize=14)
plt.show()
if filebasename is not None:
name, ext = os.path.splitext(filebasename)
fig.savefig('%s_alertdelay_stats%s' % (name, ext),
bbox_inches='tight')
def plot_uncertainty(database, eventid, currentonly=True, filebasename=None,
bars=False, percrange=0.95):
"""
Make a plot and print stats showing delay times and changes in alert
statistics over time
Args:
database (str): file path to event database (.db file)
eventid (str): event ids to plot
currentonly (bool): if True, will only plot newest version, if False
will plot all versions with different colors
filebasename (str): If defined, will save a file with a modified
version of this name depending on which alert is displayed, if no
path is given it will save in current directory.
bars (bool): if True, will use bars spanning percrange
percrange (float): percentile to use for error bars to show uncertainty
as value <1 (e.g., 0.95).
Returns:
Figure showing uncertainty
"""
fontsize = 12
out = view_database(database, eventids=[eventid], currentonly=currentonly,
printsummary=False)
if out is None:
raise Exception('No events found that meet criteria')
success = out[0]
nvers = len(success)
# Get plots ready
fig, axes = plt.subplots(2, 2, sharey=True, figsize=(14, 5))
colors = np.flipud(np.linspace(0., 0.7, nvers))
widths = np.ones(len(colors))
# make last one thicker
widths[-1] = 2.
# Fill in plot
i = 0
offset = 0
for index, row in success.iterrows():
xvalsHLS, yvalsHLS, probsHLS = get_pdfbeta(row['PH_LS'], row['QH_LS'],
lshbins,
maxlim=row['HlimLS'])
if bars:
offset = i * 0.1
valmin, valmax = get_rangebeta(row['PH_LS'], row['QH_LS'],
prob=percrange,
maxlim=row['HlimLS'])
axes[0, 0].hlines(offset+0.1, valmin, valmax,
color=str(colors[i]), lw=2)
else:
offset = 0.
axes[0, 0].plot(xvalsHLS, yvalsHLS/np.max(yvalsHLS),
color=str(colors[i]), lw=widths[i])
axes[0, 0].plot(np.max((lshbins[0], row['HaggLS'])), offset, marker=7,
color=str(colors[i]), markersize=11)
#axes[0,0].text(row['HaggLS'], 0.13, '%1.0f' % row['version'],
# color=str(colors[i]), ha='center')
xvalsHLQ, yvalsHLQ, probsHLQ = get_pdfbeta(row['PH_LQ'], row['QH_LQ'],
lqhbins,
maxlim=row['HlimLQ'])
if bars:
valmin, valmax = get_rangebeta(row['PH_LQ'], row['QH_LQ'],
prob=percrange,
maxlim=row['HlimLQ'])
axes[0, 1].hlines(offset+0.1, valmin, valmax,
color=str(colors[i]), lw=2)
else:
axes[0, 1].plot(xvalsHLQ, yvalsHLQ/np.max(yvalsHLQ),
color=str(colors[i]), lw=widths[i])
axes[0, 1].plot(np.max((lqhbins[0], row['HaggLQ'])), offset, marker=7,
color=str(colors[i]), markersize=11)
#axes[0,1].text(row['HaggLQ'], 0.13, '%1.0f' % row['version'],
# color=str(colors[i]), ha='center')
xvalsELS, yvalsELS, probsELS = get_pdfbeta(row['PE_LS'], row['QE_LS'],
lspbins,
maxlim=row['ElimLS'])
if bars:
valmin, valmax = get_rangebeta(row['PE_LS'], row['QE_LS'],
prob=percrange,
maxlim=row['ElimLS'])
axes[1, 0].hlines(offset+0.1, valmin, valmax,
color=str(colors[i]), lw=2)
else:
axes[1, 0].plot(xvalsELS, yvalsELS/np.max(yvalsELS),
color=str(colors[i]), lw=widths[i])
axes[1, 0].plot(np.max((lspbins[0], row['ExpPopLS'])), offset,
marker=7, color=str(colors[i]), markersize=11)
#axes[1,0].text(row['ExpPopLS'], 0.13, '%1.0f' % row['version'],
# color=str(colors[i]), ha='center')
xvalsELQ, yvalsELQ, probsELQ = get_pdfbeta(row['PE_LQ'], row['QE_LQ'],
lqpbins,
maxlim=row['ElimLQ'])
if bars:
valmin, valmax = get_rangebeta(row['PE_LQ'], row['QE_LQ'],
prob=percrange,
maxlim=row['ElimLQ'])
axes[1, 1].hlines(offset+0.1, valmin, valmax,
color=str(colors[i]), lw=2)
else:
axes[1, 1].plot(xvalsELQ, yvalsELQ/np.max(yvalsELQ),
color=str(colors[i]), lw=widths[i])
axes[1, 1].plot(np.max((lqpbins[0], row['ExpPopLQ'])), offset,
marker=7, color=str(colors[i]), markersize=11)
#axes[1,1].text(row['ExpPopLQ'], 0.13, '%1.0f' % row['version'],
# color=str(colors[i]), ha='center')
i += 1
if not bars:
offset = 0.9
elif offset < 0.7:
offset = 0.7
if nvers == 1:
vals = [0.125, 0.375, 0.625, 0.875]
for i in range(4):
axes[0, 0].text(vals[i], 0.1, '%.2f' % probsHLS[i], ha='center',
va='center', transform=axes[0, 0].transAxes)
axes[0, 1].text(vals[i], 0.1, '%.2f' % probsHLQ[i], ha='center',
va='center', transform=axes[0, 1].transAxes)
axes[1, 0].text(vals[i], 0.1, '%.2f' % probsELS[i], ha='center',
va='center', transform=axes[1, 0].transAxes)
axes[1, 1].text(vals[i], 0.1, '%.2f' % probsELQ[i], ha='center',
va='center', transform=axes[1, 1].transAxes)
alertcolors = ['g', 'y', 'orange', 'r']
for i in range(4):
axes[0, 0].add_patch(patches.Rectangle((lshbins[i], -0.3),
lshbins[i+1] - lshbins[i], 0.3,
color=alertcolors[i], ec='k'))
axes[1, 0].add_patch(patches.Rectangle((lspbins[i], -0.3),
lspbins[i+1] - lspbins[i], 0.3,
color=alertcolors[i], ec='k'))
axes[0, 1].add_patch(patches.Rectangle((lqhbins[i], -0.3),
lqhbins[i+1] - lqhbins[i], 0.3,
color=alertcolors[i], ec='k'))
axes[1, 1].add_patch(patches.Rectangle((lqpbins[i], -0.3),
lqpbins[i+1] - lqpbins[i], 0.3,
color=alertcolors[i], ec='k'))
axes[0, 0].set_xlabel(r'Estimated Area Exposed to Hazard ($km^2$)',
fontsize=fontsize)
axes[1, 0].set_xlabel('Estimated Population Exposure', fontsize=fontsize)
axes[0, 1].set_xlabel(r'Estimated Area Exposed to Hazard ($km^2$)',
fontsize=fontsize)
axes[1, 1].set_xlabel('Estimated Population Exposure', fontsize=fontsize)
axes[0, 0].set_title('Landslides', fontsize=fontsize+2)
axes[0, 1].set_title('Liquefaction', fontsize=fontsize+2)
axes[0, 0].set_xlim([lshbins[0], lshbins[-1]])
axes[1, 0].set_xlim([lspbins[0], lspbins[-1]])
axes[0, 1].set_xlim([lqhbins[0], lqhbins[-1]])
axes[1, 1].set_xlim([lqpbins[0], lqpbins[-1]])
fig.canvas.draw()
for ax in axes:
for ax1 in ax:
ax1.set_xscale('log')
ax1.set_ylim([-0.3, offset+.2])
ax1.tick_params(labelsize=fontsize)
plt.setp(ax1.get_yticklabels(), visible=False)
ax1.set_yticks([])
ax1.axhline(0, color='k')
# labels = [item.get_text() for item in ax1.get_xticklabels()]
# labels[0] = '$\leq$%s' % labels[0]
# labels[-1] = '$\geq$%s' % labels[-1]
# ax1.set_xticklabels(labels)
ax1.text(-0.065, -0.13, '<', transform=ax1.transAxes)
ax1.text(0.95, -0.13, '>', transform=ax1.transAxes)
plt.subplots_adjust(hspace=0.5)
fig.suptitle('%4.f - M%1.1f - %s' % (row['time'].year,
row['mag'], row['location']),
fontsize=fontsize+2)
plt.show()
if filebasename is not None:
name, ext = os.path.splitext(filebasename)
fig.savefig('%s_uncertainty%s' % (name, ext),
bbox_inches='tight')
return fig
def alert_rectangles(ax, bins):
"""
Function used to color bin levels in background of axis
"""
colors = ['g', 'yellow', 'orange', 'r']
xlims = ax.get_xlim()
ylims = ax.get_ylim()
for i, col in enumerate(colors):
y = bins[i]
y2 = bins[i+1]
if col == 'g':
corners = [[xlims[0], ylims[0]], [xlims[0], y2], [xlims[1], y2],
[xlims[1], ylims[0]]]
elif col == 'r':
corners = [[xlims[0], y], [xlims[0], ylims[1]],
[xlims[1], ylims[1]], [xlims[1], y]]
else:
corners = [[xlims[0], y], [xlims[0], y2], [xlims[1], y2],
[xlims[1], y]]
# add rectangle
rect = patches.Polygon(corners, closed=True, facecolor=col,
transform=ax.transData, alpha=0.2)
ax.add_patch(rect)
def getFileType(filename):
"""
Determine whether input file is a shapefile or a grid (ESRI or GMT).
Args:
filename (str): Path to candidate filename.
Returns:
str: 'shapefile', 'grid', or 'unknown'.
"""
# TODO MOVE TO MAPIO.
if os.path.isdir(filename):
return 'dir'
ftype = GMTGrid.getFileType(filename)
if ftype != 'unknown':
return 'gmt'
# Skip over ESRI header files
if filename.endswith('.hdr'):
return 'unknown'
try:
GDALGrid.getFileGeoDict(filename)
return 'esri'
except:
pass
return 'unknown'
|
from PyQt4.QtCore import QString
from PyQt4.QtXml import QDomDocument
from random import choice
from urlparse import urljoin
from io import BytesIO
from zipfile import ZipFile
from time import strptime, mktime
from datetime import datetime
import os.path
from src.backends.backend import backend
from src.dataclasses import show, season, episode
class thetvdbbackend(backend):
''' A backend to the thetvdb.com API '''
def __init__(self, settings):
'''
@type settings: L{src.settings.settings}
'''
backend.__init__(self, settings)
# Load site mirrors for xml, banners, zip files
self.__mirrors = [[], [], []]
self.__loadmirrors()
def searchshow(self, name):
'''
@type name: str
@rtype: list
'''
data = self._request('http://www.thetvdb.com/api/GetSeries.php?seriesname=%s' % name)
xml = QDomDocument()
xml.setContent(data)
showsxml = xml.elementsByTagName('Series')
shows = []
for i in range(len(showsxml)):
newshow = show()
newshow.id = unicode(QString(showsxml.at(i).toElement().elementsByTagName('seriesid').at(0).childNodes().at(0).toText().data()))
newshow.name = unicode(QString(showsxml.at(i).toElement().elementsByTagName('SeriesName').at(0).childNodes().at(0).toText().data()))
newshow.description = unicode(QString(showsxml.at(i).toElement().elementsByTagName('Overview').at(0).childNodes().at(0).toText().data()))
newshow.image = unicode(QString(showsxml.at(i).toElement().elementsByTagName('banner').at(0).firstChild().toText().data()))
newshow.data = showsxml.at(i).toElement()
if len(newshow.image):
self._download(urljoin(urljoin(choice(self.__mirrors[1]), '/banners/'), newshow.image), newshow.image)
shows.append(newshow)
return shows
def getlocalshows(self):
'''
@rtype: list
'''
shows = []
if self._storage.exists('shows'):
showdirs = self._storage.listdir('shows')
for showdir in showdirs:
data = self._storage.getdata('shows/%s/en.xml' % showdir)
if data != None:
xml = QDomDocument()
xml.setContent(data)
showxml = xml.elementsByTagName('Series').at(0)
newshow = show()
newshow.id = unicode(QString(showxml.toElement().elementsByTagName('id').at(0).childNodes().at(0).toText().data()))
newshow.name = unicode(QString(showxml.toElement().elementsByTagName('SeriesName').at(0).childNodes().at(0).toText().data()))
newshow.description = unicode(QString(showxml.toElement().elementsByTagName('Overview').at(0).childNodes().at(0).toText().data()))
newshow.image = unicode(QString(showxml.toElement().elementsByTagName('banner').at(0).firstChild().toText().data()))
newshow.data = showxml.toElement()
newshow.actors = unicode(QString(showxml.toElement().elementsByTagName('Actors').at(0).childNodes().at(0).toText().data())).strip('|').split('|')
newshow.contentrating = unicode(QString(showxml.toElement().elementsByTagName('ContentRating').at(0).childNodes().at(0).toText().data()))
firstaired = unicode(QString(showxml.toElement().elementsByTagName('FirstAired').at(0).childNodes().at(0).toText().data()))
if firstaired != '':
newshow.firstaired = datetime.fromtimestamp(mktime(strptime(firstaired, '%Y-%m-%d')))
else:
newshow.firstaired = datetime.now()
newshow.genre = unicode(QString(showxml.toElement().elementsByTagName('Genre').at(0).childNodes().at(0).toText().data())).strip('|').split('|')
newshow.imdb = unicode(QString(showxml.toElement().elementsByTagName('IMDB_ID').at(0).childNodes().at(0).toText().data()))
newshow.network = unicode(QString(showxml.toElement().elementsByTagName('Network').at(0).childNodes().at(0).toText().data()))
rating = unicode(QString(
showxml.toElement().elementsByTagName('Rating').at(0).childNodes().at(0).toText().data()))
if rating != '':
newshow.rating = float(rating)
else:
newshow.rating = 0.0
newshow.runtime = int(unicode(QString(showxml.toElement().elementsByTagName('Runtime').at(0).childNodes().at(0).toText().data())))
newshow.status = unicode(QString(showxml.toElement().elementsByTagName('Status').at(0).childNodes().at(0).toText().data()))
shows.append(newshow)
return shows
def addshow(self, id):
'''
@type id: str
'''
if not self._storage.exists('shows/%s' % id):
self.updateshow(id)
def updateshow(self, id):
'''
@type id: str
'''
zipdata = self._request(urljoin(choice(self.__mirrors[2]), '/api/%s/series/%s/all/en.zip' % (self.__apikey(), id)))
zipio = BytesIO()
zipio.write(bytes(zipdata))
zipfile = ZipFile(zipio)
for info in zipfile.infolist():
data = zipfile.open(info).read()
self._storage.savedata('shows/%s/%s' % (id, info.filename), data)
zipfile.close()
if self._storage.exists('shows/%s/en.xml' % id):
data = self._storage.getdata('shows/%s/en.xml' % id)
xml = QDomDocument()
xml.setContent(data)
showxml = xml.elementsByTagName('Series').at(0)
imageurl = unicode(QString(showxml.toElement().elementsByTagName('banner').at(0).childNodes().at(0).toText().data()))
if len(imageurl) > 0 and not self._storage.exists(imageurl):
self._download(urljoin(urljoin(choice(self.__mirrors[1]), '/banners/'), imageurl), imageurl)
if self._storage.exists('shows/%s/banners.xml' % id):
data = self._storage.getdata('shows/%s/banners.xml' % id)
xml = QDomDocument()
xml.setContent(data)
bannersxml = xml.elementsByTagName('Banner')
bannerslist = {}
for bannernum in range(bannersxml.count()):
language = unicode(QString(bannersxml.at(bannernum).toElement().elementsByTagName('Language').at(0).childNodes().at(0).toText().data()))
if language == 'en':
bannertype = unicode(QString(bannersxml.at(bannernum).toElement().elementsByTagName('BannerType').at(0).childNodes().at(0).toText().data()))
if bannertype == 'season':
bannertype2 = unicode(QString(bannersxml.at(bannernum).toElement().elementsByTagName('BannerType2').at(0).childNodes().at(0).toText().data()))
if bannertype2 == 'seasonwide':
bannerpath = unicode(QString(bannersxml.at(bannernum).toElement().elementsByTagName('BannerPath').at(0).childNodes().at(0).toText().data()))
rating = QString(bannersxml.at(bannernum).toElement().elementsByTagName('BannerPath').at(0).childNodes().at(0).toText().data()).toInt()[0]
bannerseasoniditems = os.path.splitext(os.path.basename(bannerpath))[0].split('-')
bannerseasonid = '-'.join((bannerseasoniditems[0], bannerseasoniditems[1]))
if not bannerseasonid in bannerslist:
bannerslist[bannerseasonid] = []
bannerslist[bannerseasonid].append((bannerpath, rating))
for (bannerseasonid, banners) in bannerslist.iteritems():
sortedbanners = sorted(banners, key = lambda item: item[1])
if len(sortedbanners[0][0]) > 0 and not self._storage.exists('seasonbanners/%s%s' % (bannerseasonid, os.path.splitext(sortedbanners[0][0])[1])):
self._download(urljoin(urljoin(choice(self.__mirrors[1]), '/banners/'), sortedbanners[0][0]), 'seasonbanners/%s%s' % (bannerseasonid, os.path.splitext(sortedbanners[0][0])[1]))
def getlocalseasons(self, id):
'''
@type id: str
@rtype: list
'''
seasons = {}
if self._storage.exists('shows/%s/en.xml' % id):
data = self._storage.getdata('shows/%s/en.xml' % id)
xml = QDomDocument()
xml.setContent(data)
episodes = xml.elementsByTagName('Episode')
for episode in range(episodes.count()):
seasonid = unicode(QString(episodes.at(episode).toElement().elementsByTagName('seasonid').at(0).childNodes().at(0).toText().data()))
seasonnum = QString(episodes.at(episode).toElement().elementsByTagName('SeasonNumber').at(0).childNodes().at(0).toText().data()).toInt()
if seasonnum[1]:
if seasonnum[0] > 0:
if not seasonid in seasons:
newseason = season()
newseason.id = seasonid
newseason.number = seasonnum[0]
newseason.image = 'seasonbanners/%s-%d.jpg' % (id, newseason.number)
newseason.showid = id
seasons[seasonid] = newseason
return sorted(seasons.values(), key = lambda item: item.number)
def getlocalepisodes(self, showid, seasonid):
'''
@type showid: str
@type seasonid: str
@rtype: list
'''
episodes = []
if self._storage.exists('shows/%s/en.xml' % showid):
data = self._storage.getdata('shows/%s/en.xml' % showid)
xml = QDomDocument()
xml.setContent(data)
episodelist = xml.elementsByTagName('Episode')
for episodenum in range(episodelist.count()):
if seasonid == unicode(QString(episodelist.at(episodenum).toElement().elementsByTagName('seasonid').at(0).childNodes().at(0).toText().data())):
newepisode = episode()
number = QString(episodelist.at(episodenum).toElement().elementsByTagName('EpisodeNumber').at(0).childNodes().at(0).toText().data()).toInt()
if number[1]:
newepisode.number = number[0]
else:
newepisode.number = 0
if newepisode.number > 0:
newepisode.id = unicode(QString(episodelist.at(episodenum).toElement().elementsByTagName('id').at(0).childNodes().at(0).toText().data()))
newepisode.name = unicode(QString(episodelist.at(episodenum).toElement().elementsByTagName('EpisodeName').at(0).childNodes().at(0).toText().data()))
newepisode.description = unicode(QString(episodelist.at(episodenum).toElement().elementsByTagName('Overview').at(0).childNodes().at(0).toText().data()))
datestring = unicode(QString(episodelist.at(episodenum).toElement().elementsByTagName('FirstAired').at(0).childNodes().at(0).toText().data()))
if len(datestring) > 0:
newepisode.date = datetime.fromtimestamp(mktime(strptime(datestring, '%Y-%m-%d')))
newepisode.showid = showid
newepisode.seasonid = seasonid
newepisode.watched = self.getwatched(newepisode.showid, newepisode.seasonid, newepisode.id)
episodes.append(newepisode)
return sorted(episodes, key = lambda item: item.number)
def removeshow(self, id):
'''
@type id: str
'''
self._storage.removedir('shows/%s' % id)
def __apikey(self):
'''
@rtype: str
'''
return self._getsetting('apikey', str, 'C66331E1E6D28F85')
def __loadmirrors(self):
data = self._request('http://www.thetvdb.com/api/%s/mirrors.xml' % self.__apikey())
xml = QDomDocument()
xml.setContent(data)
mirrors = xml.elementsByTagName('Mirror')
for i in range(len(mirrors)):
typemask = QString(mirrors.at(i).toElement().elementsByTagName('typemask').at(0).childNodes().at(0).toText().data()).toInt()
mirrorpath = unicode(QString(mirrors.at(i).toElement().elementsByTagName('mirrorpath').at(0).childNodes().at(0).toText().data()))
if typemask[1]:
if typemask[0] & 1:
self.__mirrors[0].append(mirrorpath)
if typemask[0] & 2:
self.__mirrors[1].append(mirrorpath)
if typemask[0] & 4:
self.__mirrors[2].append(mirrorpath)
|
import json
from django.conf import settings
from django.core.mail import send_mail, BadHeaderError
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_http_methods
from contacts.forms import ContactForm
from helpers.models import Helpers
from offer.models import Offer
from offer.models import OfferCategory
from tours.forms import BookNow
from tours.models import Category, Tour, About
def get_lang(request):
lang = request.LANGUAGE_CODE
return lang
def get_company():
return Helpers.objects.get(id=1).company_name
def home(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = request.LANGUAGE_CODE
booking_form = BookNow()
breadcrumbs = [
{'url': '/', 'name': _('Home'), 'active': True},
]
header = {
'pt': Helpers.objects.get(id=1).start_page_header_pt,
'en': Helpers.objects.get(id=1).start_page_header_gb,
'de': Helpers.objects.get(id=1).start_page_header_de
}
tour_header = {
'pt': Helpers.objects.get(id=1).tour_header_name_PT,
'en': Helpers.objects.get(id=1).tour_header_name_EN,
'de': Helpers.objects.get(id=1).tour_header_name_DE
}
offer_header = {
'pt': Helpers.objects.get(id=1).offer_header_name_PT,
'en': Helpers.objects.get(id=1).offer_header_name_EN,
'de': Helpers.objects.get(id=1).offer_header_name_DE
}
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
context = {
'booking_form': booking_form,
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'audio': Helpers.objects.get(id=1).audio,
'company': get_company(),
'header': header[lang],
'value': _('Send'),
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'section': {
'tour_header': tour_header[lang],
'offer_header': offer_header[lang]
},
'img1': Helpers.objects.get(id=1).img,
'img2': Helpers.objects.get(id=1).img2,
'img3': Helpers.objects.get(id=1).img3,
'img4': Helpers.objects.get(id=1).img4,
'img5': Helpers.objects.get(id=1).img5,
'lang': lang,
'offer_list': Offer.objects.all(),
'tour_list': Tour.objects.all(),
'breadcrumbs': breadcrumbs
}
return render(request, 'partials/home.html', context)
def about(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '#', 'name': _('About'), 'active': True}
]
lang = request.LANGUAGE_CODE
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('About'),
'breadcrumbs': breadcrumbs,
'about_list': About.objects.all(),
}
return render(request, 'partials/about.html', context)
def login_or_register(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
breadcrumbs = [{'url': '/', 'name': _('Home'), 'active': True}]
return render(request, 'partials/login_or_register.html', {'breadcrumbs': breadcrumbs})
def search(request):
lang = request.LANGUAGE_CODE
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
offer_queryset = Offer.objects.all()
tour_queryset = Tour.objects.all()
query = request.GET.get('q')
offer_object_list = []
tour_object_list = []
if 'pt' in lang:
offer_object_list = offer_queryset.filter(
Q(title_PT__icontains=query) |
Q(description_PT__icontains=query)
).distinct()
else:
if 'en' in lang:
offer_object_list = offer_queryset.filter(
Q(title_EN__icontains=query) |
Q(description_EN__icontains=query)
).distinct()
else:
if 'de' in lang:
offer_object_list = offer_queryset.filter(
Q(title_DE__icontains=query) |
Q(description_DE__icontains=query))
if 'pt' in lang:
tour_object_list = tour_queryset.filter(
Q(title_PT__icontains=query) |
Q(description_PT__icontains=query)
).distinct()
else:
if 'en' in lang:
tour_object_list = tour_queryset.filter(
Q(title_EN__icontains=query) |
Q(description_EN__icontains=query)
).distinct()
else:
if 'de' in lang:
tour_object_list = tour_queryset.filter(
Q(title_DE__icontains=query) |
Q(description_DE__icontains=query))
context = {
'offer_object_list': offer_object_list,
'tour_object_list': tour_object_list,
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'title': 'Contact me',
'company': get_company(),
'breadcrumbs': [
{'url': '/', 'name': _('Home')},
]}
return render(request, 'partials/search.html', context)
@require_http_methods(['POST'])
def welcome(request):
body_unicode = request.body.decode('utf-8')
body_data = json.loads(body_unicode)
form = ContactForm({
"name": body_data["name"],
"email": body_data["email"],
"message": body_data["message"],
"additional_information": body_data["additionalInformation"],
})
if form.is_valid():
return HttpResponse(request.body)
else:
response = HttpResponse(form.errors)
response.status_code = 422
response.reason_phrase = 'Validation failed'
return response
|
import sys
import numpy as np
import warnings
from ..pakbase import Package
from ..utils import Util2d, MfList, Transient2d
# Note: Order matters as first 6 need logical flag on line 1 of SSM file
SsmLabels = ['WEL', 'DRN', 'RCH', 'EVT', 'RIV', 'GHB', 'BAS6', 'CHD', 'PBC']
class SsmPackage(object):
def __init__(self, label='', instance=None, needTFstr=False):
self.label = label
self.instance = instance
self.needTFstr = needTFstr
self.TFstr = ' F'
if self.instance is not None:
self.TFstr = ' T'
class Mt3dSsm(Package):
"""
MT3DMS Source and Sink Mixing Package Class.
Parameters
----------
model : model object
The model object (of type :class:`hataripy.mt3d.mt.Mt3dms`) to which
this package will be added.
crch : Transient2d, scalar, array of floats, or dictionary
CRCH is the concentration of recharge for species 1.
If the recharge flux is positive, it acts as a source whose
concentration can be specified as desired. If the recharge flux is
negative, it acts as a sink (discharge) whose concentration is always
set equal to the concentration of groundwater at the cell where
discharge occurs. Note that the location and flow rate of
recharge/discharge are obtained from the flow model directly through
the unformatted flow-transport link file. crch can be specified as
an array, if the array is constant for the entire simulation. If
crch changes by stress period, then the user must provide a
dictionary, where the key is the stress period number (zero based) and
the value is the recharge array. The recharge concentration
can be specified for additional species by passing additional
arguments to the Mt3dSsm constructor. For example, to specify the
recharge concentration for species two one could use
crch2={0: 0., 1: 10*np.ones((nrow, ncol), dtype=np.float)} as
and additional keyword argument that is passed to Mt3dSsm when making
the ssm object.
cevt : Transient2d, scalar, array of floats, or dictionary
is the concentration of evapotranspiration flux for species 1.
Evapotranspiration is the only type of sink whose
concentration may be specified externally. Note that the
concentration of a sink cannot be greater than that of the aquifer at
the sink cell. Thus, if the sink concentration is specified greater
than that of the aquifer, it is automatically set equal to the
concentration of the aquifer. Also note that the location and flow
rate of evapotranspiration are obtained from the flow model directly
through the unformatted flow-transport link file. For multi-species
simulations, see crch for a description of how to specify
additional concentrations arrays for each species.
stress_period_data : dictionary
Keys in the dictionary are stress zero-based stress period numbers;
values in the dictionary are recarrays of SSM boundaries. The
dtype for the recarray can be obtained using ssm.dtype (after the
ssm package has been created). The default dtype for the recarray is
np.dtype([('k', np.int), ("i", np.int), ("j", np.int),
("css", np.float32), ("itype", np.int),
((cssms(n), np.float), n=1, ncomp)])
If there are more than one component species, then additional entries
will be added to the dtype as indicated by cssm(n).
Note that if the number of dictionary entries is less than the number
of stress periods, then the last recarray of boundaries will apply
until the end of the simulation. Full details of all options to
specify stress_period_data can be found in the
hataripy3_multi-component_SSM ipython notebook in the Notebook
subdirectory of the examples directory.
css is the specified source concentration or mass-loading rate,
depending on the value of ITYPE, in a single-species simulation,
(For a multispecies simulation, CSS is not used, but a dummy value
still needs to be entered here.)
Note that for most types of sources, CSS is interpreted as the
source concentration with the unit of mass per unit volume (ML-3),
which, when multiplied by its corresponding flow rate (L3T-1) from
the flow model, yields the mass-loading rate (MT-1) of the source.
For a special type of sources (ITYPE = 15), CSS is taken directly as
the mass-loading rate (MT-1) of the source so that no flow rate is
required from the flow model.
Furthermore, if the source is specified as a constant-concentration
cell (itype = -1), the specified value of CSS is assigned directly as
the concentration of the designated cell. If the designated cell is
also associated with a sink/source term in the flow model, the flow
rate is not used.
itype is an integer indicating the type of the point source. An itype
dictionary can be retrieved from the ssm object as
itype = mt3d.Mt3dSsm.itype_dict()
(CSSMS(n), n=1, NCOMP) defines the concentrations of a point source
for multispecies simulation with NCOMP>1. In a multispecies
simulation, it is necessary to define the concentrations of all
species associated with a point source. As an example, if a chemical
of a certain species is injected into a multispecies system, the
concentration of that species is assigned a value greater than zero
while the concentrations of all other species are assigned zero.
CSSMS(n) can be entered in free format, separated by a comma or space
between values.
Several important notes on assigning concentration for the
constant-concentration condition (ITYPE = -1) are listed below:
The constant-concentration condition defined in this input file takes
precedence to that defined in the Basic Transport Package input file.
In a multiple stress period simulation, a constant-concentration
cell, once defined, will remain a constant- concentration cell in the
duration of the simulation, but its concentration value can be
specified to vary in different stress periods.
In a multispecies simulation, if it is only necessary to define
different constant-concentration conditions for selected species at
the same cell location, specify the desired concentrations for those
species, and assign a negative value for all other species. The
negative value is a flag used by MT3DMS to skip assigning the
constant-concentration condition for the designated species.
dtype : np.dtype
dtype to use for the recarray of boundaries. If left as None (the
default) then the dtype will be automatically constructed.
extension : string
Filename extension (default is 'ssm')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import hataripy
>>> m = hataripy.mt3d.Mt3dms()
>>> itype = mt3d.Mt3dSsm.itype_dict()
>>> ssm_data = {}
>>> ssm_data[0] = [(4, 4, 4, 1.0, itype['GHB'], 1.0, 100.0)]
>>> ssm_data[5] = [(4, 4, 4, 0.5, itype['GHB'], 0.5, 200.0)]
>>> ssm = hataripy.mt3d.Mt3dSsm(m, stress_period_data=ssm_data)
"""
def __init__(self, model, crch=None, cevt=None, mxss=None,
stress_period_data=None, dtype=None,
extension='ssm', unitnumber=None, filenames=None,
**kwargs):
if unitnumber is None:
unitnumber = Mt3dSsm.defaultunit()
elif unitnumber == 0:
unitnumber = Mt3dSsm.reservedunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [Mt3dSsm.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
deprecated_kwargs = ['criv', 'cghb', 'cibd', 'cchd', 'cpbc', 'cwel']
for key in kwargs:
if (key in deprecated_kwargs):
warnings.warn("Deprecation Warning: Keyword argument '" + key +
"' no longer supported. Use " +
"'stress_period_data' instead.")
# Set dimensions
mf = self.parent.mf
nrow = model.nrow
ncol = model.ncol
nlay = model.nlay
ncomp = model.ncomp
# Create a list of SsmPackage (class defined above)
self.__SsmPackages = []
if mf is not None:
for i, label in enumerate(SsmLabels):
mfpack = mf.get_package(label)
ssmpack = SsmPackage(label, mfpack, (i < 6))
self.__SsmPackages.append(
ssmpack) # First 6 need T/F flag in file line 1
if dtype is not None:
self.dtype = dtype
else:
self.dtype = self.get_default_dtype(ncomp)
if stress_period_data is None:
self.stress_period_data = None
else:
self.stress_period_data = MfList(self, model=model,
data=stress_period_data,
list_free_format=False)
if mxss is None and mf is None:
warnings.warn('SSM Package: mxss is None and modflowmodel is ' +
'None. Cannot calculate max number of sources ' +
'and sinks. Estimating from stress_period_data. ')
if mxss is None:
# Need to calculate max number of sources and sinks
self.mxss = 0
if self.stress_period_data is not None:
self.mxss += np.sum(
self.stress_period_data.data[0].itype == -1)
self.mxss += np.sum(
self.stress_period_data.data[0].itype == -15)
if isinstance(self.parent.btn.icbund, np.ndarray):
self.mxss += (self.parent.btn.icbund < 0).sum()
for p in self.__SsmPackages:
if ((p.label == 'BAS6') and (p.instance != None)):
self.mxss += (p.instance.ibound.array < 0).sum()
elif p.instance != None:
self.mxss += p.instance.ncells()
else:
self.mxss = mxss
# Note: list is used for multi-species, NOT for stress periods!
self.crch = None
try:
if crch is None and model.mf.rch is not None:
print("found 'rch' in modflow model, resetting crch to 0.0")
crch = 0.0
except:
if model.verbose:
print(' explicit crcg in file')
if crch is not None:
self.crch = []
t2d = Transient2d(model, (nrow, ncol), np.float32,
crch, name='crch1',
locat=self.unit_number[0],
array_free_format=False)
self.crch.append(t2d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
val = 0.0
name = "crch" + str(icomp)
if name in list(kwargs.keys()):
val = kwargs.pop(name)
else:
print("SSM: setting crch for component " + \
str(icomp) + " to zero. kwarg name " + \
name)
t2d = Transient2d(model, (nrow, ncol), np.float32,
val, name=name,
locat=self.unit_number[0],
array_free_format=False)
self.crch.append(t2d)
# else:
# try:
# if model.mf.rch is not None:
# print("found 'rch' in modflow model, resetting crch to 0.0")
# self.crch = [Transient2d(model, (nrow, ncol), np.float32,
# 0, name='crch1',
# locat=self.unit_number[0],
# array_free_format=False)]
#
# else:
# self.crch = None
# except:
# self.crch = None
self.cevt = None
try:
if cevt is None and (
model.mf.evt is not None or model.mf.ets is not None):
print(
"found 'ets'/'evt' in modflow model, resetting cevt to 0.0")
cevt = 0.0
except:
if model.verbose:
print(' explicit cevt in file')
if cevt is not None:
self.cevt = []
t2d = Transient2d(model, (nrow, ncol), np.float32,
cevt, name='cevt1',
locat=self.unit_number[0],
array_free_format=False)
self.cevt.append(t2d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
val = 0.0
name = "cevt" + str(icomp)
if name in list(kwargs.keys()):
val = kwargs[name]
kwargs.pop(name)
else:
print("SSM: setting cevt for component " + \
str(icomp) + " to zero, kwarg name " + \
name)
t2d = Transient2d(model, (nrow, ncol), np.float32,
val, name=name,
locat=self.unit_number[0],
array_free_format=False)
self.cevt.append(t2d)
# else:
# try:
# if model.mf.evt is not None or model.mf.ets is not None:
# print("found 'ets'/'evt' in modflow model, resetting cevt to 0.0")
# self.cevt = [Transient2d(model, (nrow, ncol), np.float32,
# 0, name='cevt1',
# locat=self.unit_number[0],
# array_free_format=False)]
#
# else:
# self.cevt = None
# except:
# self.cevt = None
if len(list(kwargs.keys())) > 0:
raise Exception("SSM error: unrecognized kwargs: " +
' '.join(list(kwargs.keys())))
# Add self to parent and return
self.parent.add_package(self)
return
def from_package(self, package, ncomp_aux_names):
"""
read the point source and sink info from a package
ncomp_aux_names (list): the aux variable names in the package
that are the component concentrations
"""
raise NotImplementedError()
@staticmethod
def itype_dict():
itype = {}
itype["CHD"] = 1
itype["BAS6"] = 1
itype["PBC"] = 1
itype["WEL"] = 2
itype["DRN"] = 3
itype["RIV"] = 4
itype["GHB"] = 5
itype["MAS"] = 15
itype["CC"] = -1
return itype
@staticmethod
def get_default_dtype(ncomp=1):
"""
Construct a dtype for the recarray containing the list of sources
and sinks
"""
type_list = [("k", np.int), ("i", np.int), ("j", np.int),
("css", np.float32), ("itype", np.int)]
if ncomp > 1:
for comp in range(1, ncomp + 1):
comp_name = "cssm({0:02d})".format(comp)
type_list.append((comp_name, np.float32))
dtype = np.dtype(type_list)
return dtype
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
# Open file for writing
f_ssm = open(self.fn_path, 'w')
for p in self.__SsmPackages:
if p.needTFstr:
f_ssm.write(p.TFstr)
f_ssm.write(' F F F F F F F F F F\n')
f_ssm.write('{:10d}\n'.format(self.mxss))
# Loop through each stress period and write ssm information
nper = self.parent.nper
for kper in range(nper):
if f_ssm.closed == True:
f_ssm = open(f_ssm.name, 'a')
# Distributed sources and sinks (Recharge and Evapotranspiration)
if self.crch is not None:
# If any species need to be written, then all need to be
# written
incrch = -1
for t2d in self.crch:
incrchicomp, file_entry = t2d.get_kper_entry(kper)
incrch = max(incrch, incrchicomp)
if incrch == 1:
break
f_ssm.write('{:10d}\n'.format(incrch))
if incrch == 1:
for t2d in self.crch:
u2d = t2d[kper]
file_entry = u2d.get_file_entry()
f_ssm.write(file_entry)
if self.cevt is not None:
# If any species need to be written, then all need to be
# written
incevt = -1
for t2d in self.cevt:
incevticomp, file_entry = t2d.get_kper_entry(kper)
incevt = max(incevt, incevticomp)
if incevt == 1:
break
f_ssm.write('{:10d}\n'.format(incevt))
if incevt == 1:
for t2d in self.cevt:
u2d = t2d[kper]
file_entry = u2d.get_file_entry()
f_ssm.write(file_entry)
# List of sources
if self.stress_period_data is not None:
self.stress_period_data.write_transient(f_ssm, single_per=kper)
else:
f_ssm.write('{}\n'.format(0))
f_ssm.close()
return
@staticmethod
def load(f, model, nlay=None, nrow=None, ncol=None, nper=None,
ncomp=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`hataripy.mt3d.mt.Mt3dms`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`hataripy.utils.mfreadnam.parsenamefile`.
Returns
-------
ssm : Mt3dSsm object
Mt3dSsm object.
Examples
--------
>>> import hataripy
>>> mt = hataripy.mt3d.Mt3dms()
>>> ssm = hataripy.mt3d.Mt3dSsm.load('test.ssm', mt)
"""
if model.verbose:
sys.stdout.write('loading ssm package file...\n')
# Open file, if necessary
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# Set modflow model and dimensions if necessary
mf = model.mf
if nlay is None:
nlay = model.nlay
if nrow is None:
nrow = model.nrow
if ncol is None:
ncol = model.ncol
if nper is None:
nper = model.nper
if ncomp is None:
ncomp = model.ncomp
# dtype
dtype = Mt3dSsm.get_default_dtype(ncomp)
# Dataset 0 -- comment line
while True:
line = f.readline()
if line[0] != '#':
break
# Item D1: Dummy input line - line already read above
if model.verbose:
print(
' loading FWEL, FDRN, FRCH, FEVT, FRIV, FGHB, (FNEW(n), n=1,4)...')
fwel = line[0:2]
fdrn = line[2:4]
frch = line[4:6]
fevt = line[6:8]
friv = line[8:10]
fghb = line[10:12]
if len(line) >= 14:
fnew1 = line[12:14]
else:
fnew1 = 'F'
if len(line) >= 16:
fnew2 = line[14:16]
else:
fnew2 = 'F'
if len(line) >= 18:
fnew3 = line[16:18]
else:
fnew3 = 'F'
if len(line) >= 20:
fnew4 = line[18:20]
else:
fnew4 = 'F'
if model.verbose:
print(' FWEL {}'.format(fwel))
print(' FDRN {}'.format(fdrn))
print(' FRCH {}'.format(frch))
print(' FEVT {}'.format(fevt))
print(' FRIV {}'.format(friv))
print(' FGHB {}'.format(fghb))
print(' FNEW1 {}'.format(fnew1))
print(' FNEW2 {}'.format(fnew2))
print(' FNEW3 {}'.format(fnew3))
print(' FNEW4 {}'.format(fnew4))
# Override the logical settings at top of ssm file using the
# modflowmodel, if it is attached to parent
if mf is not None:
rchpack = mf.get_package('RCH')
if rchpack is not None:
frch = 't'
evtpack = mf.get_package('EVT')
if evtpack is not None:
fevt = 't'
# Item D2: MXSS, ISSGOUT
mxss = None
if model.verbose:
print(' loading MXSS, ISSGOUT...')
line = f.readline()
mxss = int(line[0:10])
try:
issgout = int(line[10:20])
except:
issgout = 0
if model.verbose:
print(' MXSS {}'.format(mxss))
print(' ISSGOUT {}'.format(issgout))
# kwargs needed to construct crch2, crch3, etc. for multispecies
kwargs = {}
crch = None
if 't' in frch.lower():
t2d = Transient2d(model, (nrow, ncol), np.float32,
0.0, name='crch', locat=0,
array_free_format=False)
crch = {0: t2d}
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "crch" + str(icomp)
t2d = Transient2d(model, (nrow, ncol), np.float32,
0.0, name=name, locat=0,
array_free_format=False)
kwargs[name] = {0: t2d}
cevt = None
if 't' in fevt.lower():
t2d = Transient2d(model, (nrow, ncol), np.float32,
0.0, name='cevt', locat=0,
array_free_format=False)
cevt = {0: t2d}
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "cevt" + str(icomp)
t2d = Transient2d(model, (nrow, ncol), np.float32,
0.0, name=name, locat=0,
array_free_format=False)
kwargs[name] = {0: t2d}
stress_period_data = {}
for iper in range(nper):
if model.verbose:
print(" loading ssm for kper {0:5d}".format(iper + 1))
# Item D3: INCRCH
incrch = -1
if 't' in frch.lower():
if model.verbose:
print(' loading INCRCH...')
line = f.readline()
incrch = int(line[0:10])
# Item D4: CRCH
if incrch >= 0:
if model.verbose:
print(' loading CRCH...')
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'crch',
ext_unit_dict, array_format="mt3d")
crch[iper] = t
# Load each multispecies array
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "crch" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
t = Util2d.load(f, model, (nrow, ncol),
np.float32, name, ext_unit_dict,
array_format="mt3d")
crchicomp = kwargs[name]
crchicomp[iper] = t
# Item D5: INCEVT
incevt = -1
if 't' in fevt.lower():
if model.verbose:
print(' loading INCEVT...')
line = f.readline()
incevt = int(line[0:10])
# Item D6: CEVT
if incevt >= 0:
if model.verbose:
print(' loading CEVT...')
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'cevt',
ext_unit_dict, array_format="mt3d")
cevt[iper] = t
# Load each multispecies array
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "cevt" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
t = Util2d.load(f, model, (nrow, ncol),
np.float32, name, ext_unit_dict,
array_format="mt3d")
cevticomp = kwargs[name]
cevticomp[iper] = t
# Item D7: NSS
if model.verbose:
print(' loading NSS...')
line = f.readline()
nss = int(line[0:10])
# Item D8: KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP)
if model.verbose:
print(
' loading KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP)...')
current = 0
if nss > 0:
current = np.empty((nss), dtype=dtype)
for ibnd in range(nss):
line = f.readline()
t = []
for ivar in range(5):
istart = ivar * 10
istop = istart + 10
t.append(line[istart:istop])
ncssms = len(current.dtype.names) - 5
if ncssms > 0:
tt = line[istop:].strip().split()
for ivar in range(ncssms):
t.append(tt[ivar])
current[ibnd] = tuple(t[:len(current.dtype.names)])
# convert indices to zero-based
current['k'] -= 1
current['i'] -= 1
current['j'] -= 1
current = current.view(np.recarray)
stress_period_data[iper] = current
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=Mt3dSsm.ftype())
# Construct and return ssm package
ssm = Mt3dSsm(model, crch=crch, cevt=cevt, mxss=mxss,
stress_period_data=stress_period_data,
unitnumber=unitnumber, filenames=filenames, **kwargs)
return ssm
@staticmethod
def ftype():
return 'SSM'
@staticmethod
def defaultunit():
return 34
@staticmethod
def reservedunit():
return 4
|
from django.shortcuts import render
from django.contrib.admin.views.decorators import staff_member_required
from database.extract import Extract
from database.transform import Transform
from database.load import Load
from database.utils import DBManage
@staff_member_required(login_url='/users/login/')
def etl(request):
return render(request, 'database/etl.html')
@staff_member_required(login_url='/users/login/')
def etl_extract(request):
extract = Extract()
message = extract.extract()
return render(request, 'database/etl.html', {'message_extract': message})
@staff_member_required(login_url='/users/login/')
def etl_transform(request):
transform = Transform()
message = transform.transform_basic()
return render(request, 'database/etl.html', {'message_transform': message})
@staff_member_required(login_url='/users/login/')
def etl_load(request):
loading = Load()
message = loading.load_data()
return render(request, 'database/etl.html', {'message_load': message})
@staff_member_required(login_url='/users/login/')
def etl_manage_nutriscore(request):
managing = DBManage()
message = managing.load_nutriscore()
return render(request, 'database/etl.html', {'message_manage_nutriscore': message})
@staff_member_required(login_url='/users/login/')
def etl_manage_delete(request):
managing = DBManage()
message = managing.delete_tables()
return render(request, 'database/etl.html', {'message_manage_delete': message})
|
import base64
import os
import warnings
from videotoframes import convert
from tests.utilities import get_testfiles_path
def test_convert():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64)
assert frames[0] != frames[-1]
assert len(frames) == 166
def test_convert_max_frames():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=10)
assert frames[0] != frames[-1]
assert len(frames) == 10
def test_main_max_frames_even():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=10, even=True)
assert frames[0] != frames[-1]
assert len(frames) == 10
def test_convert_max_frames_even_2_frames():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=2, even=True)
assert frames[0] != frames[-1]
assert len(frames) == 2
def test_convert_frame_rate():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, frame_rate=2)
assert frames[0] != frames[-1]
assert len(frames) == 12
def test_convert_frame_rate_small():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, frame_rate=1)
assert frames[0] != frames[-1]
assert len(frames) == 6
def test_convert_frame_rate_divisible():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, frame_rate=15)
assert frames[0] != frames[-1]
assert len(frames) == 83
def test_convert_frame_rate_full():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, frame_rate=30)
assert frames[0] != frames[-1]
assert len(frames) == 166
def test_convert_frame_rate_higher_than_full():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, frame_rate=40)
assert frames[0] != frames[-1]
assert len(frames) == 166
def test_convert_frame_rate_max_frames():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=8, frame_rate=2)
assert len(frames) == 8
assert frames[0] != frames[-1]
def test_convert_frame_rate_max_frames_higher_than_frame_count():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=13, frame_rate=2)
assert len(frames) == 12
assert frames[0] != frames[-1]
def test_convert_deprecate_list_response():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
convert(video_base_64, max_frames=13, frame_rate=2, return_dict=False)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Returning a list instead of a list of dictionaries." in str(w[-1].message)
def test_convert_dictionary_return():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=5, frame_rate=1, return_dict=True)
assert len(frames) == 5
for frame in frames:
assert isinstance(frame, dict)
assert set(frame.keys()) == {'base64image', 'frameNumber'}
assert frames[0] != frames[-1]
def test_convert_video_timestamp():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=6, video_timestamp='2019-02-10 20:25:00')
assert len(frames) == 6
for frame in frames:
assert isinstance(frame, dict)
assert set(frame.keys()) == {'base64image', 'frameNumber', 'timestamp'}
assert ['2019-02-10 20:25:00',
'2019-02-10 20:25:00',
'2019-02-10 20:25:00',
'2019-02-10 20:25:00',
'2019-02-10 20:25:00',
'2019-02-10 20:25:00'] == [frame['timestamp'] for frame in frames]
def test_convert_video_timestamp_frame_rate():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=6, frame_rate=4, video_timestamp='2019-02-10 20:25:00')
assert len(frames) == 6
for frame in frames:
assert isinstance(frame, dict)
assert set(frame.keys()) == {'base64image', 'frameNumber', 'timestamp'}
assert ['2019-02-10 20:25:00',
'2019-02-10 20:25:00',
'2019-02-10 20:25:00',
'2019-02-10 20:25:00',
'2019-02-10 20:25:01',
'2019-02-10 20:25:01'] == [frame['timestamp'] for frame in frames]
def test_convert_video_timestamp_even():
with open(os.path.join(get_testfiles_path(), 'small.mp4'), 'rb') as file:
video_base_64 = base64.b64encode(file.read()).decode()
frames = convert(video_base_64, max_frames=6, even=True, video_timestamp='2019-02-10 20:25:00')
assert len(frames) == 6
for frame in frames:
assert isinstance(frame, dict)
assert set(frame.keys()) == {'base64image', 'frameNumber', 'timestamp'}
assert ['2019-02-10 20:25:00',
'2019-02-10 20:25:01',
'2019-02-10 20:25:02',
'2019-02-10 20:25:03',
'2019-02-10 20:25:04',
'2019-02-10 20:25:05'] == [frame['timestamp'] for frame in frames]
|
import cv2
import numpy as np
np.set_printoptions(threshold=np.inf)
def increase_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
#print(v.head(5))
print(sum(v))
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
img = cv2.imread('2.jpeg') # load rgb image
img_final = increase_brightness(img, value=20)
#cv2.imshow('Original image',img)
#cv2.imshow('Final image', img_final)
cv2.imwrite("2_processed.jpg", img_final)
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Quarantine (isolate) an endpoint"
class Input:
AGENT = "agent"
QUARANTINE_STATE = "quarantine_state"
WHITELIST = "whitelist"
class Output:
RESULT_CODE = "result_code"
RESULT_CONTENT = "result_content"
RESULT_DESCRIPTION = "result_description"
class QuarantineInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"agent": {
"type": "string",
"title": "Agent",
"description": "Agent ID, hostname, MAC address, or IP address of the agent to perform the action on",
"order": 1
},
"quarantine_state": {
"type": "boolean",
"title": "Quarantine State",
"description": "True to quarantine host, false to unquarantine host",
"default": true,
"order": 2
},
"whitelist": {
"type": "array",
"title": "Whitelist",
"description": "This list contains a set of devices that should not be blocked. This can include IPs, hostnames, UUIDs and agent IDs",
"items": {
"type": "string"
},
"order": 3
}
},
"required": [
"agent"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class QuarantineOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"result_code": {
"type": "integer",
"title": "Result Code",
"description": "The Apex Central Automation API result code",
"order": 1
},
"result_content": {
"type": "array",
"title": "Result Content",
"description": "The Apex Central Automation API result content",
"items": {
"$ref": "#/definitions/result_content"
},
"order": 2
},
"result_description": {
"type": "string",
"title": "Result Description",
"description": "The Apex Central Automation API result description",
"order": 3
}
},
"definitions": {
"result_content": {
"type": "object",
"title": "result_content",
"properties": {
"capabilities": {
"type": "array",
"title": "Capabilities",
"description": "Result capabilities",
"items": {
"type": "string"
},
"order": 1
},
"entity_id": {
"type": "string",
"title": "Entity ID",
"description": "Result entity ID",
"order": 2
},
"folder_path": {
"type": "string",
"title": "Folder Path",
"description": "Result folder path",
"order": 3
},
"host_name": {
"type": "string",
"title": "Host Name",
"description": "Result host name",
"order": 4
},
"ip_address_list": {
"type": "string",
"title": "IP Address List",
"description": "Result IP address list",
"order": 5
},
"isolation_status": {
"type": "string",
"title": "Isolation Status",
"description": "Result isolation status",
"order": 6
},
"mac_address_list": {
"type": "string",
"title": "MAC Address List",
"description": "Result MAC address list",
"order": 7
},
"managing_server_id": {
"type": "string",
"title": "Managing Server ID",
"description": "Result managing server ID",
"order": 8
},
"product": {
"type": "string",
"title": "Product",
"description": "Result product",
"order": 9
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
from twisted.internet.protocol import Factory, ServerFactory, Protocol, DatagramProtocol
from twisted.internet import reactor
class IPBusServerProtocol(DatagramProtocol, Protocol):
def datagramReceived(self, datagram, address):
print("Received udp")
self.transport.write(datagram, address)
def dataReceived(self, data):
print("Received tcp")
self.transport.write(data)
class IPBusServerFactory(ServerFactory):
protocol = IPBusServerProtocol
class Packet:
def __init__(self, data=None):
self.packet_type = 1
self.payload = ''
self.structure = '!H6s'
if data == None:
return
self.packet_type, self.payload = struct.unpack(self.structure, data)
def pack(self):
return struct.pack(self.structure, self.packet_type, self.payload)
def __str__(self):
return "Type: {}\nPayload {}\n\n".format(self.packet_type, self.payload)
def main():
reactor.listenTCP(8000, IPBusServerFactory())
reactor.listenUDP(8000, IPBusServerProtocol())
reactor.run()
reactor.stop()
if __name__ == '__main__':
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EventHubArgs', 'EventHub']
@pulumi.input_type
class EventHubArgs:
def __init__(__self__, *,
message_retention: pulumi.Input[int],
namespace_name: pulumi.Input[str],
partition_count: pulumi.Input[int],
resource_group_name: pulumi.Input[str],
capture_description: Optional[pulumi.Input['EventHubCaptureDescriptionArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EventHub resource.
:param pulumi.Input[int] message_retention: Specifies the number of days to retain the events for this Event Hub.
:param pulumi.Input[str] namespace_name: Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[int] partition_count: Specifies the current number of shards on the Event Hub.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
:param pulumi.Input['EventHubCaptureDescriptionArgs'] capture_description: A `capture_description` block as defined below.
:param pulumi.Input[str] name: Specifies the name of the EventHub resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] status: Specifies the status of the Event Hub resource. Possible values are `Active`, `Disabled` and `SendDisabled`. Defaults to `Active`.
"""
pulumi.set(__self__, "message_retention", message_retention)
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "partition_count", partition_count)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if capture_description is not None:
pulumi.set(__self__, "capture_description", capture_description)
if name is not None:
pulumi.set(__self__, "name", name)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="messageRetention")
def message_retention(self) -> pulumi.Input[int]:
"""
Specifies the number of days to retain the events for this Event Hub.
"""
return pulumi.get(self, "message_retention")
@message_retention.setter
def message_retention(self, value: pulumi.Input[int]):
pulumi.set(self, "message_retention", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Input[int]:
"""
Specifies the current number of shards on the Event Hub.
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: pulumi.Input[int]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="captureDescription")
def capture_description(self) -> Optional[pulumi.Input['EventHubCaptureDescriptionArgs']]:
"""
A `capture_description` block as defined below.
"""
return pulumi.get(self, "capture_description")
@capture_description.setter
def capture_description(self, value: Optional[pulumi.Input['EventHubCaptureDescriptionArgs']]):
pulumi.set(self, "capture_description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the EventHub resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the status of the Event Hub resource. Possible values are `Active`, `Disabled` and `SendDisabled`. Defaults to `Active`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class _EventHubState:
def __init__(__self__, *,
capture_description: Optional[pulumi.Input['EventHubCaptureDescriptionArgs']] = None,
message_retention: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
partition_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering EventHub resources.
:param pulumi.Input['EventHubCaptureDescriptionArgs'] capture_description: A `capture_description` block as defined below.
:param pulumi.Input[int] message_retention: Specifies the number of days to retain the events for this Event Hub.
:param pulumi.Input[str] name: Specifies the name of the EventHub resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] namespace_name: Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[int] partition_count: Specifies the current number of shards on the Event Hub.
:param pulumi.Input[Sequence[pulumi.Input[str]]] partition_ids: The identifiers for partitions created for Event Hubs.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] status: Specifies the status of the Event Hub resource. Possible values are `Active`, `Disabled` and `SendDisabled`. Defaults to `Active`.
"""
if capture_description is not None:
pulumi.set(__self__, "capture_description", capture_description)
if message_retention is not None:
pulumi.set(__self__, "message_retention", message_retention)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace_name is not None:
pulumi.set(__self__, "namespace_name", namespace_name)
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if partition_ids is not None:
pulumi.set(__self__, "partition_ids", partition_ids)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="captureDescription")
def capture_description(self) -> Optional[pulumi.Input['EventHubCaptureDescriptionArgs']]:
"""
A `capture_description` block as defined below.
"""
return pulumi.get(self, "capture_description")
@capture_description.setter
def capture_description(self, value: Optional[pulumi.Input['EventHubCaptureDescriptionArgs']]):
pulumi.set(self, "capture_description", value)
@property
@pulumi.getter(name="messageRetention")
def message_retention(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of days to retain the events for this Event Hub.
"""
return pulumi.get(self, "message_retention")
@message_retention.setter
def message_retention(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "message_retention", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the EventHub resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the current number of shards on the Event Hub.
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter(name="partitionIds")
def partition_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The identifiers for partitions created for Event Hubs.
"""
return pulumi.get(self, "partition_ids")
@partition_ids.setter
def partition_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "partition_ids", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the status of the Event Hub resource. Possible values are `Active`, `Disabled` and `SendDisabled`. Defaults to `Active`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class EventHub(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capture_description: Optional[pulumi.Input[pulumi.InputType['EventHubCaptureDescriptionArgs']]] = None,
message_retention: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Event Hubs as a nested resource within a Event Hubs namespace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_event_hub_namespace = azure.eventhub.EventHubNamespace("exampleEventHubNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard",
capacity=1,
tags={
"environment": "Production",
})
example_event_hub = azure.eventhub.EventHub("exampleEventHub",
namespace_name=example_event_hub_namespace.name,
resource_group_name=example_resource_group.name,
partition_count=2,
message_retention=1)
```
## Import
EventHubs can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventhub/eventHub:EventHub eventhub1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1/eventhubs/eventhub1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['EventHubCaptureDescriptionArgs']] capture_description: A `capture_description` block as defined below.
:param pulumi.Input[int] message_retention: Specifies the number of days to retain the events for this Event Hub.
:param pulumi.Input[str] name: Specifies the name of the EventHub resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] namespace_name: Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[int] partition_count: Specifies the current number of shards on the Event Hub.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] status: Specifies the status of the Event Hub resource. Possible values are `Active`, `Disabled` and `SendDisabled`. Defaults to `Active`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EventHubArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Event Hubs as a nested resource within a Event Hubs namespace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_event_hub_namespace = azure.eventhub.EventHubNamespace("exampleEventHubNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard",
capacity=1,
tags={
"environment": "Production",
})
example_event_hub = azure.eventhub.EventHub("exampleEventHub",
namespace_name=example_event_hub_namespace.name,
resource_group_name=example_resource_group.name,
partition_count=2,
message_retention=1)
```
## Import
EventHubs can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventhub/eventHub:EventHub eventhub1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1/eventhubs/eventhub1
```
:param str resource_name: The name of the resource.
:param EventHubArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EventHubArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capture_description: Optional[pulumi.Input[pulumi.InputType['EventHubCaptureDescriptionArgs']]] = None,
message_retention: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EventHubArgs.__new__(EventHubArgs)
__props__.__dict__["capture_description"] = capture_description
if message_retention is None and not opts.urn:
raise TypeError("Missing required property 'message_retention'")
__props__.__dict__["message_retention"] = message_retention
__props__.__dict__["name"] = name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
if partition_count is None and not opts.urn:
raise TypeError("Missing required property 'partition_count'")
__props__.__dict__["partition_count"] = partition_count
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["status"] = status
__props__.__dict__["partition_ids"] = None
super(EventHub, __self__).__init__(
'azure:eventhub/eventHub:EventHub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
capture_description: Optional[pulumi.Input[pulumi.InputType['EventHubCaptureDescriptionArgs']]] = None,
message_retention: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
partition_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'EventHub':
"""
Get an existing EventHub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['EventHubCaptureDescriptionArgs']] capture_description: A `capture_description` block as defined below.
:param pulumi.Input[int] message_retention: Specifies the number of days to retain the events for this Event Hub.
:param pulumi.Input[str] name: Specifies the name of the EventHub resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] namespace_name: Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[int] partition_count: Specifies the current number of shards on the Event Hub.
:param pulumi.Input[Sequence[pulumi.Input[str]]] partition_ids: The identifiers for partitions created for Event Hubs.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] status: Specifies the status of the Event Hub resource. Possible values are `Active`, `Disabled` and `SendDisabled`. Defaults to `Active`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EventHubState.__new__(_EventHubState)
__props__.__dict__["capture_description"] = capture_description
__props__.__dict__["message_retention"] = message_retention
__props__.__dict__["name"] = name
__props__.__dict__["namespace_name"] = namespace_name
__props__.__dict__["partition_count"] = partition_count
__props__.__dict__["partition_ids"] = partition_ids
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["status"] = status
return EventHub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="captureDescription")
def capture_description(self) -> pulumi.Output[Optional['outputs.EventHubCaptureDescription']]:
"""
A `capture_description` block as defined below.
"""
return pulumi.get(self, "capture_description")
@property
@pulumi.getter(name="messageRetention")
def message_retention(self) -> pulumi.Output[int]:
"""
Specifies the number of days to retain the events for this Event Hub.
"""
return pulumi.get(self, "message_retention")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the EventHub resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "namespace_name")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Output[int]:
"""
Specifies the current number of shards on the Event Hub.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="partitionIds")
def partition_ids(self) -> pulumi.Output[Sequence[str]]:
"""
The identifiers for partitions created for Event Hubs.
"""
return pulumi.get(self, "partition_ids")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the status of the Event Hub resource. Possible values are `Active`, `Disabled` and `SendDisabled`. Defaults to `Active`.
"""
return pulumi.get(self, "status")
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_file', '0009_fixed_null_fields'),
]
operations = [
migrations.AlterField(
model_name='file',
name='file_name',
field=models.CharField(default='', help_text='Overrides the default file name with the given value.', max_length=255, verbose_name='Name', blank=True),
preserve_default=False,
),
]
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.StateVariables.StateVariable import StateVariable
class SvVoltage(StateVariable):
"""State variable for voltage.State variable for voltage.
"""
def __init__(self, angle=0.0, v=0.0, TopologicalNode=None, *args, **kw_args):
"""Initialises a new 'SvVoltage' instance.
@param angle: The voltage angle in radians of the topological node.
@param v: The voltage magnitude of the topological node.
@param TopologicalNode: The topological node associated with the voltage state.
"""
#: The voltage angle in radians of the topological node.
self.angle = angle
#: The voltage magnitude of the topological node.
self.v = v
self._TopologicalNode = None
self.TopologicalNode = TopologicalNode
super(SvVoltage, self).__init__(*args, **kw_args)
_attrs = ["angle", "v"]
_attr_types = {"angle": float, "v": float}
_defaults = {"angle": 0.0, "v": 0.0}
_enums = {}
_refs = ["TopologicalNode"]
_many_refs = []
def getTopologicalNode(self):
"""The topological node associated with the voltage state.
"""
return self._TopologicalNode
def setTopologicalNode(self, value):
if self._TopologicalNode is not None:
self._TopologicalNode._SvVoltage = None
self._TopologicalNode = value
if self._TopologicalNode is not None:
self._TopologicalNode.SvVoltage = None
self._TopologicalNode._SvVoltage = self
TopologicalNode = property(getTopologicalNode, setTopologicalNode)
|
# %%
import numpy as np
import json
# from nilearn import plotting
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from ieeg.auth import Session
import scipy
import csv
import pandas as pd
import os
from scipy.io import savemat, loadmat
from os.path import join as ospj
import sys
import warnings
warnings.filterwarnings("ignore")
sys.path.append('tools')
from pull_patient_localization import pull_patient_localization
# Read credentials and start iEEG session
with open('../credentials.json') as f:
credentials = json.load(f)
username = credentials['username']
password = credentials['password']
s = Session(username, password)
# Get paths from config file and metadata
with open("config.json") as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
data_path = ospj(repo_path, 'data')
# %%
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(data_path, 'patient_localization_final.mat'))
# %% Load metadata file
metadata = pd.read_excel(os.path.join(metadata_path, 'atlas_metadata_simplified.xlsx'))
electrodes = config['electrodes']
# def identify_channels(mode='all'):
# for patient in metadata['Patient']:
for patient in patients:
localization_ind = patients.index(patient)
iEEG_filename = metadata[metadata['Patient'] == patient]['portal_ID'].item()
pt_labels = labels[localization_ind]
pt_ignore = ignore[localization_ind].T[0]
pt_resect = resect[localization_ind].T[0]
pt_gm_wm = gm_wm[localization_ind].T[0]
# Set up region list
pt_region = []
for i in region[localization_ind]:
if len(i[0]) == 0:
pt_region.append('')
else:
pt_region.append(i[0][0])
pt_soz = soz[localization_ind].T[0]
df_data = {
'labels': pt_labels,
'ignore': pt_ignore,
'resect': pt_resect,
'gm_wm': pt_gm_wm,
'region': pt_region,
'soz': pt_soz
}
print("Starting pipeline for {0}, iEEG filename is {1}".format(patient, iEEG_filename))
df = pd.DataFrame(df_data).reset_index()
df_filtered = df[df['ignore'] != 1]
if electrodes == "regions":
# Take first electrode in each region
df_filtered = df_filtered.groupby("region").first()
# Remove white matter and non-localized electrodes
df_filtered = df_filtered[df_filtered['gm_wm'] != -1]
# Sort rows in alphabetical order by electrode name, easier to read with iEEG.org
df_filtered.sort_values(by=['labels'], inplace=True)
if electrodes == "regions":
mdic = {
"iEEGFilename": iEEG_filename,
"targetElectrodesRegionInds": np.array(df_filtered['index']), # +1 for one-indexing in MATLAB
"Regions": list(df_filtered.index),
"electrodeNames": list(df_filtered['labels'])
}
else:
mdic = {
"iEEGFilename": iEEG_filename,
"targetElectrodesRegionInds": np.array(df_filtered['index']), # +1 for one-indexing in MATLAB
"Regions": list(df_filtered['region']),
"electrodeNames": list(df_filtered['labels'])
}
patient_data_path = os.path.join(data_path, patient)
if not os.path.exists(patient_data_path):
os.makedirs(patient_data_path)
save_path = os.path.join(patient_data_path, "selected_electrodes_elec-{}.mat".format(electrodes))
savemat(save_path, mdic)
print("\t{} has {} channels after filtering".format(patient, len(mdic['Regions'])))
print("\tResults are saved in {}".format(save_path))
# %%
|
import numpy as np
import os
import glob
import argparse
import cv2
import pandas as pd
from metric import spatial_accuracy, temporal_accuracy
from utils import denormalize, normalize, draw_pnt, sample_cnt
IMAGE_FOLDER = 'images'
POINT_FOLDER = 'points'
INDEX_FNAME = 'indices.txt'
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument('--data_path', default='./data/PoST', type=str, help='directory to dataset')
args.add_argument('--result_path', required=True, type=str, help='directory to result')
args.add_argument('--output_path', default='./outputs/PoST', type=str, help='directory to outputs')
args.add_argument('--threshs', nargs='+', default=[0.16, 0.08, 0.04], type=float, help='thresholds to evaluate')
args.add_argument('--visualize', action='store_true', help='visualize the results')
opt = args.parse_args()
data_path = opt.data_path
result_path = opt.result_path
visualize = opt.visualize
threshs = opt.threshs
output_path = opt.output_path
exp_name = os.path.basename(result_path)
output_path = os.path.join(output_path, exp_name)
os.makedirs(output_path, exist_ok=True)
seqs = [d for d in os.listdir(data_path) if not d.startswith('.')]
seqs = sorted(seqs)
sp_acc_all = {}
tp_acc_all = {}
for i, seq_name in enumerate(seqs):
print(f'({i+1:02d}/{len(seqs):02d}) processing sequence:{seq_name}...')
if visualize:
viz_path = os.path.join(output_path, 'visuals', seq_name)
os.makedirs(viz_path, exist_ok=True)
# setting accuracy dictionaries
sp_acc_all[seq_name] = {f'th:{thresh}':[] for thresh in threshs}
tp_acc_all[seq_name] = {f'th:{thresh}':[] for thresh in threshs}
# setting path
seq_path = os.path.join(data_path, seq_name)
img_dir = os.path.join(seq_path, IMAGE_FOLDER)
pnt_dir = os.path.join(seq_path, POINT_FOLDER)
idx_path = os.path.join(seq_path, INDEX_FNAME)
res_dir = os.path.join(result_path, seq_name)
# image and point list
img_list = sorted(os.listdir(img_dir))
pnt_list = sorted(os.listdir(pnt_dir))
idx = np.loadtxt(idx_path, dtype=np.long)
# anchor image
img0_path = os.path.join(img_dir, img_list[0])
img0 = cv2.imread(img0_path)
height, width = img0.shape[:2]
# path to init points
init_path = os.path.join(seq_path, 'init.txt')
gt_path = os.path.join(pnt_dir, pnt_list[0])
res_path = os.path.join(res_dir, pnt_list[0])
# set inital points
init = np.loadtxt(init_path)
gt = np.loadtxt(gt_path)
if os.path.exists(res_path):
res = np.loadtxt(res_path)
else:
res = np.loadtxt(gt_path)
# if new index is needed, preprocess it
if exp_name == 'Ours':
idx = sample_cnt(init, idx, len(res))
# to calculate temporal accuracy
prev_gt = None
prev_res = None
prev_keep = None
# eval
for pnt_fname in pnt_list[1:]:
gt_path = os.path.join(pnt_dir, pnt_fname)
res_path = os.path.join(res_dir, pnt_fname)
# load gt and build mask for keep (-1 for no point to eval)
gt = np.loadtxt(gt_path)
keep = gt[...,0] >= 0
# load result
if os.path.exists(res_path):
res = np.loadtxt(res_path)
else:
print(f'[WARNING] {res_path} is not found, using previous point to calculate')
if res is None:
continue
# sample points to eval
if len(res) != len(gt):
res = res[idx]
# normalize
gt = normalize(gt, width=width, height=height)
if np.sum(res > 1.) > 0:
res = normalize(res, width=width, height=height)
# calculate accuracy for each threshold in the sequence
for thresh in threshs:
sp_acc = spatial_accuracy(gt[keep], res[keep], thresh)
sp_acc_all[seq_name][f'th:{thresh}'].append(sp_acc)
if prev_gt is not None:
keep_both = np.logical_and(keep, prev_keep)
tp_acc = temporal_accuracy(gt[keep_both], res[keep_both],
prev_gt[keep_both], prev_res[keep_both],
thresh)
tp_acc_all[seq_name][f'th:{thresh}'].append(tp_acc)
# to calculate temporal accuracy
prev_gt = gt
prev_res = res
prev_keep = keep
# visualization
if visualize:
img_fname = pnt_fname.replace('.txt', '.jpg')
img_path = os.path.join(img_dir, img_fname)
img = cv2.imread(img_path)
height, width = img.shape[:2]
res_denorm = denormalize(res, width, height)
img = draw_pnt(img, res_denorm)
save_path = os.path.join(viz_path, img_fname)
cv2.imwrite(save_path, img)
# calculate mean of each sequence
for thresh in threshs:
sp_acc_all[seq_name][f'th:{thresh}'] = np.mean(sp_acc_all[seq_name][f'th:{thresh}'])
tp_acc_all[seq_name][f'th:{thresh}'] = np.mean(tp_acc_all[seq_name][f'th:{thresh}'])
# calculate mean for all
sp_acc_all['mean'] = {}
tp_acc_all['mean'] = {}
for thresh in threshs:
sp_accs = [ sp_acc_all[seq_name][f'th:{thresh}'] for seq_name in sp_acc_all if seq_name != 'mean']
tp_accs = [ tp_acc_all[seq_name][f'th:{thresh}'] for seq_name in tp_acc_all if seq_name != 'mean']
sp_acc_all['mean'][f'th:{thresh}'] = np.mean(sp_accs)
tp_acc_all['mean'][f'th:{thresh}'] = np.mean(tp_accs)
sp_acc_df = pd.DataFrame(sp_acc_all).round(3)
tp_acc_df = pd.DataFrame(tp_acc_all).round(3)
print(sp_acc_df)
print(tp_acc_df)
sp_save_path = os.path.join(output_path, 'spatial_accuracy.csv')
tp_save_path = os.path.join(output_path, 'temporal_accuracy.csv')
sp_acc_df.to_csv(sp_save_path)
tp_acc_df.to_csv(tp_save_path)
|
from .misc import (ProgressDialog, ProjectDialog, AlignmentErrorDialog, SampleExportDialog,
SimulationDialog, ScriptExportDialog, PathLengthPlotter, AboutDialog, CalibrationErrorDialog)
from .preferences import Preferences
from .insert import InsertPrimitiveDialog, InsertPointDialog, InsertVectorDialog, PickPointDialog, AlignSample
from .managers import SampleManager, PointManager, VectorManager, JawControl, PositionerControl, DetectorControl
from .tools import TransformDialog
|
import logging
from collections import defaultdict, OrderedDict
import gym
from gym import spaces
from rware.utils import MultiAgentActionSpace, MultiAgentObservationSpace
from enum import Enum
import numpy as np
from typing import List, Tuple, Optional, Dict
import networkx as nx
_AXIS_Z = 0
_AXIS_Y = 1
_AXIS_X = 2
_COLLISION_LAYERS = 2
_LAYER_AGENTS = 0
_LAYER_SHELFS = 1
class _VectorWriter:
def __init__(self, size: int):
self.vector = np.zeros(size, dtype=np.float32)
self.idx = 0
def write(self, data):
data_size = len(data)
self.vector[self.idx : self.idx + data_size] = data
self.idx += data_size
def skip(self, bits):
self.idx += bits
class Action(Enum):
NOOP = 0
FORWARD = 1
LEFT = 2
RIGHT = 3
TOGGLE_LOAD = 4
class Direction(Enum):
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
class RewardType(Enum):
GLOBAL = 0
INDIVIDUAL = 1
TWO_STAGE = 2
class Entity:
def __init__(self, id_: int, x: int, y: int):
self.id = id_
self.prev_x = None
self.prev_y = None
self.x = x
self.y = y
class Agent(Entity):
counter = 0
def __init__(self, x: int, y: int, dir_: Direction, msg_bits: int):
Agent.counter += 1
super().__init__(Agent.counter, x, y)
self.dir = dir_
self.message = np.zeros(msg_bits)
self.req_action: Optional[Action] = None
self.carrying_shelf: Optional[Shelf] = None
self.canceled_action = None
self.has_delivered = False
@property
def collision_layers(self):
if self.loaded:
return (_LAYER_AGENTS, _LAYER_SHELFS)
else:
return (_LAYER_AGENTS,)
def req_location(self, grid_size) -> Tuple[int, int]:
if self.req_action != Action.FORWARD:
return self.x, self.y
elif self.dir == Direction.UP:
return self.x, max(0, self.y - 1)
elif self.dir == Direction.DOWN:
return self.x, min(grid_size[0] - 1, self.y + 1)
elif self.dir == Direction.LEFT:
return max(0, self.x - 1), self.y
elif self.dir == Direction.RIGHT:
return min(grid_size[1] - 1, self.x + 1), self.y
raise ValueError(
f"Direction is {self.dir}. Should be one of {[v for v in Direction]}"
)
def req_direction(self) -> Direction:
wraplist = [Direction.UP, Direction.RIGHT, Direction.DOWN, Direction.LEFT]
if self.req_action == Action.RIGHT:
return wraplist[(wraplist.index(self.dir) + 1) % len(wraplist)]
elif self.req_action == Action.LEFT:
return wraplist[(wraplist.index(self.dir) - 1) % len(wraplist)]
else:
return self.dir
class Shelf(Entity):
counter = 0
def __init__(self, x, y):
Shelf.counter += 1
super().__init__(Shelf.counter, x, y)
@property
def collision_layers(self):
return (_LAYER_SHELFS,)
class Warehouse(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"]}
def __init__(
self,
shelf_columns: int,
column_height: int,
shelf_rows: int,
n_agents: int,
msg_bits: int,
sensor_range: int,
request_queue_size: int,
max_inactivity_steps: Optional[int],
max_steps: Optional[int],
reward_type: RewardType,
fast_obs=True,
):
"""The robotic warehouse environment
Creates a grid world where multiple agents (robots)
are supposed to collect shelfs, bring them to a goal
and then return them.
.. note:
The grid looks like this:
shelf
columns
vv
----------
-XX-XX-XX- ^
-XX-XX-XX- Column Height
-XX-XX-XX- v
----------
-XX----XX- <\
-XX----XX- <- Shelf Rows
-XX----XX- </
----------
----GG----
G: is the goal positions where agents are rewarded if
they bring the correct shelfs.
The final grid size will be
height: (column_height + 1) * shelf_rows + 2
width: (2 + 1) * shelf_columns + 1
The bottom-middle column will be removed to allow for
robot queuing next to the goal locations
:param shelf_columns: Number of columns in the warehouse
:type shelf_columns: int
:param column_height: Column height in the warehouse
:type column_height: int
:param shelf_rows: Number of columns in the warehouse
:type shelf_rows: int
:param n_agents: Number of spawned and controlled agents
:type n_agents: int
:param msg_bits: Number of communication bits for each agent
:type msg_bits: int
:param sensor_range: Range of each agents observation
:type sensor_range: int
:param request_queue_size: How many shelfs are simultaneously requested
:type request_queue_size: int
:param max_inactivity: Number of steps without a delivered shelf until environment finishes
:type max_inactivity: Optional[int]
:param reward_type: Specifies if agents are rewarded individually or globally
:type reward_type: RewardType
"""
assert shelf_columns % 2 == 1, "Only odd number of shelf columns is supported"
self.grid_size = (
(column_height + 1) * shelf_rows + 2,
(2 + 1) * shelf_columns + 1,
)
self.n_agents = n_agents
self.msg_bits = msg_bits
self.column_height = column_height
self.sensor_range = sensor_range
self.max_inactivity_steps: Optional[int] = max_inactivity_steps
self.reward_type = reward_type
self.reward_range = (0, 1)
self._cur_inactive_steps = None
self._cur_steps = 0
self.max_steps = max_steps
self.grid = np.zeros((_COLLISION_LAYERS, *self.grid_size), dtype=np.int32)
sa_action_space = [len(Action), *msg_bits * (2,)]
if len(sa_action_space) == 1:
sa_action_space = spaces.Discrete(sa_action_space[0])
else:
sa_action_space = spaces.MultiDiscrete(sa_action_space)
self.action_space = spaces.Tuple(tuple(n_agents * [sa_action_space]))
self.request_queue_size = request_queue_size
self.request_queue = []
self.agents: List[Agent] = []
self.goals: List[Tuple[int, int]] = [
(self.grid_size[1] // 2 - 1, self.grid_size[0] - 1),
(self.grid_size[1] // 2, self.grid_size[0] - 1),
]
self._obs_bits_for_self = 4 + len(Direction)
self._obs_bits_per_agent = 1 + len(Direction) + self.msg_bits
self._obs_bits_per_shelf = 2
self._obs_bits_for_requests = 2
self._obs_sensor_locations = (1 + 2 * self.sensor_range) ** 2
self._obs_length = (
self._obs_bits_for_self
+ self._obs_sensor_locations * self._obs_bits_per_agent
+ self._obs_sensor_locations * self._obs_bits_per_shelf
)
# default values:
self.fast_obs = None
self.observation_space = None
self._use_slow_obs()
# for performance reasons we
# can flatten the obs vector
if fast_obs:
self._use_fast_obs()
self.renderer = None
def _use_slow_obs(self):
self.fast_obs = False
self.observation_space = spaces.Tuple(
tuple(
[
spaces.Dict(
OrderedDict(
{
"self": spaces.Dict(
OrderedDict(
{
"location": spaces.Box(
low=0,
high=np.array(
[
self.grid_size[1],
self.grid_size[0],
]
),
dtype=int,
),
"carrying_shelf": spaces.MultiBinary(1),
"direction": spaces.Discrete(4),
"on_highway": spaces.MultiBinary(1),
}
)
),
"sensors": spaces.Tuple(
self._obs_sensor_locations
* (
spaces.Dict(
OrderedDict(
{
"has_agent": spaces.MultiBinary(1),
"direction": spaces.Discrete(4),
"local_message": spaces.MultiBinary(
self.msg_bits
),
"has_shelf": spaces.MultiBinary(1),
"shelf_requested": spaces.MultiBinary(
1
),
}
)
),
)
),
}
)
)
for _ in range(self.n_agents)
]
)
)
def _use_fast_obs(self):
if self.fast_obs:
return
self.fast_obs = True
ma_spaces = []
for sa_obs in self.observation_space:
flatdim = spaces.flatdim(sa_obs)
ma_spaces += [
spaces.Box(
low=-float("inf"),
high=float("inf"),
shape=(flatdim,),
dtype=np.float32,
)
]
self.observation_space = spaces.Tuple(tuple(ma_spaces))
def _is_highway(self, x: int, y: int) -> bool:
return (
(x % 3 == 0) # vertical highways
or (y % (self.column_height + 1) == 0) # horizontal highways
or (y == self.grid_size[0] - 1) # delivery row
or ( # remove a box for queuing
(y > self.grid_size[0] - (self.column_height + 3))
and ((x == self.grid_size[1] // 2 - 1) or (x == self.grid_size[1] // 2))
)
)
def _make_obs(self, agent):
y_scale, x_scale = self.grid_size[0] - 1, self.grid_size[1] - 1
min_x = agent.x - self.sensor_range
max_x = agent.x + self.sensor_range + 1
min_y = agent.y - self.sensor_range
max_y = agent.y + self.sensor_range + 1
# sensors
if (
(min_x < 0)
or (min_y < 0)
or (max_x > self.grid_size[1])
or (max_y > self.grid_size[0])
):
padded_agents = np.pad(
self.grid[_LAYER_AGENTS], self.sensor_range, mode="constant"
)
padded_shelfs = np.pad(
self.grid[_LAYER_SHELFS], self.sensor_range, mode="constant"
)
# + self.sensor_range due to padding
min_x += self.sensor_range
max_x += self.sensor_range
min_y += self.sensor_range
max_y += self.sensor_range
else:
padded_agents = self.grid[_LAYER_AGENTS]
padded_shelfs = self.grid[_LAYER_SHELFS]
agents = padded_agents[min_y:max_y, min_x:max_x].reshape(-1)
shelfs = padded_shelfs[min_y:max_y, min_x:max_x].reshape(-1)
if self.fast_obs:
obs = _VectorWriter(self.observation_space[agent.id - 1].shape[0])
obs.write([agent.x, agent.y, int(agent.carrying_shelf is not None)])
direction = np.zeros(4)
direction[agent.dir.value] = 1.0
obs.write(direction)
obs.write([int(self._is_highway(agent.x, agent.y))])
for i, (id_agent, id_shelf) in enumerate(zip(agents, shelfs)):
if id_agent == 0:
obs.skip(1)
obs.write([1.0])
obs.skip(3 + self.msg_bits)
else:
obs.write([1.0])
direction = np.zeros(4)
direction[self.agents[id_agent - 1].dir.value] = 1.0
obs.write(direction)
if self.msg_bits > 0:
obs.write(self.agents[id_agent - 1].message)
if id_shelf == 0:
obs.skip(2)
else:
obs.write(
[1.0, int(self.shelfs[id_shelf - 1] in self.request_queue)]
)
return obs.vector
# --- self data
obs = {}
obs["self"] = {
"location": np.array([agent.x, agent.y]),
"carrying_shelf": [int(agent.carrying_shelf is not None)],
"direction": agent.dir.value,
"on_highway": [int(self._is_highway(agent.x, agent.y))],
}
# --- sensor data
obs["sensors"] = tuple({} for _ in range(self._obs_sensor_locations))
# find neighboring agents
for i, id_ in enumerate(agents):
if id_ == 0:
obs["sensors"][i]["has_agent"] = [0]
obs["sensors"][i]["direction"] = 0
obs["sensors"][i]["local_message"] = self.msg_bits * [0]
else:
obs["sensors"][i]["has_agent"] = [1]
obs["sensors"][i]["direction"] = self.agents[id_ - 1].dir.value
obs["sensors"][i]["local_message"] = self.agents[id_ - 1].message
# find neighboring shelfs:
for i, id_ in enumerate(shelfs):
if id_ == 0:
obs["sensors"][i]["has_shelf"] = [0]
obs["sensors"][i]["shelf_requested"] = [0]
else:
obs["sensors"][i]["has_shelf"] = [1]
obs["sensors"][i]["shelf_requested"] = [
int(self.shelfs[id_ - 1] in self.request_queue)
]
return obs
def _recalc_grid(self):
self.grid[:] = 0
for s in self.shelfs:
self.grid[_LAYER_SHELFS, s.y, s.x] = s.id
for a in self.agents:
self.grid[_LAYER_AGENTS, a.y, a.x] = a.id
def reset(self):
Shelf.counter = 0
Agent.counter = 0
self._cur_inactive_steps = 0
self._cur_steps = 0
# n_xshelf = (self.grid_size[1] - 1) // 3
# n_yshelf = (self.grid_size[0] - 2) // 9
# make the shelfs
self.shelfs = [
Shelf(x, y)
for y, x in zip(
np.indices(self.grid_size)[0].reshape(-1),
np.indices(self.grid_size)[1].reshape(-1),
)
if not self._is_highway(x, y)
]
# spawn agents at random locations
agent_locs = np.random.choice(
np.arange(self.grid_size[0] * self.grid_size[1]),
size=self.n_agents,
replace=False,
)
agent_locs = np.unravel_index(agent_locs, self.grid_size)
# and direction
agent_dirs = np.random.choice([d for d in Direction], size=self.n_agents)
self.agents = [
Agent(x, y, dir_, self.msg_bits)
for y, x, dir_ in zip(*agent_locs, agent_dirs)
]
self._recalc_grid()
self.request_queue = list(
np.random.choice(self.shelfs, size=self.request_queue_size, replace=False)
)
return tuple([self._make_obs(agent) for agent in self.agents])
# for s in self.shelfs:
# self.grid[0, s.y, s.x] = 1
# print(self.grid[0])
def step(
self, actions: List[Action]
) -> Tuple[List[np.ndarray], List[float], List[bool], Dict]:
assert len(actions) == len(self.agents)
for agent, action in zip(self.agents, actions):
if self.msg_bits > 0:
agent.req_action = Action(action[0])
agent.message[:] = action[1:]
else:
agent.req_action = Action(action)
# # stationary agents will certainly stay where they are
# stationary_agents = [agent for agent in self.agents if agent.action != Action.FORWARD]
# # forward agents will move only if they avoid collisions
# forward_agents = [agent for agent in self.agents if agent.action == Action.FORWARD]
commited_agents = set()
G = nx.DiGraph()
for agent in self.agents:
start = agent.x, agent.y
target = agent.req_location(self.grid_size)
if (
agent.carrying_shelf
and start != target
and self.grid[_LAYER_SHELFS, target[1], target[0]]
and not (
self.grid[_LAYER_AGENTS, target[1], target[0]]
and self.agents[
self.grid[_LAYER_AGENTS, target[1], target[0]] - 1
].carrying_shelf
)
):
# there's a standing shelf at the target location
# our agent is carrying a shelf so there's no way
# this movement can succeed. Cancel it.
agent.req_action = Action.NOOP
G.add_edge(start, start)
else:
G.add_edge(start, target)
wcomps = [G.subgraph(c).copy() for c in nx.weakly_connected_components(G)]
for comp in wcomps:
try:
# if we find a cycle in this component we have to
# commit all nodes in that cycle, and nothing else
cycle = nx.algorithms.find_cycle(comp)
if len(cycle) == 2:
# we have a situation like this: [A] <-> [B]
# which is physically impossible. so skip
continue
for edge in cycle:
start_node = edge[0]
agent_id = self.grid[_LAYER_AGENTS, start_node[1], start_node[0]]
if agent_id > 0:
commited_agents.add(agent_id)
except nx.NetworkXNoCycle:
longest_path = nx.algorithms.dag_longest_path(comp)
for x, y in longest_path:
agent_id = self.grid[_LAYER_AGENTS, y, x]
if agent_id:
commited_agents.add(agent_id)
commited_agents = set([self.agents[id_ - 1] for id_ in commited_agents])
failed_agents = set(self.agents) - commited_agents
for agent in failed_agents:
assert agent.req_action == Action.FORWARD
agent.req_action = Action.NOOP
rewards = np.zeros(self.n_agents)
for agent in self.agents:
agent.prev_x, agent.prev_y = agent.x, agent.y
if agent.req_action == Action.FORWARD:
agent.x, agent.y = agent.req_location(self.grid_size)
if agent.carrying_shelf:
agent.carrying_shelf.x, agent.carrying_shelf.y = agent.x, agent.y
elif agent.req_action in [Action.LEFT, Action.RIGHT]:
agent.dir = agent.req_direction()
elif agent.req_action == Action.TOGGLE_LOAD and not agent.carrying_shelf:
shelf_id = self.grid[_LAYER_SHELFS, agent.y, agent.x]
if shelf_id:
agent.carrying_shelf = self.shelfs[shelf_id - 1]
elif agent.req_action == Action.TOGGLE_LOAD and agent.carrying_shelf:
if not self._is_highway(agent.x, agent.y):
agent.carrying_shelf = None
if agent.has_delivered and self.reward_type == RewardType.TWO_STAGE:
rewards[agent.id - 1] += 0.5
agent.has_delivered = False
self._recalc_grid()
shelf_delivered = False
for x, y in self.goals:
shelf_id = self.grid[_LAYER_SHELFS, y, x]
if not shelf_id:
continue
shelf = self.shelfs[shelf_id - 1]
if shelf not in self.request_queue:
continue
# a shelf was successfully delived.
shelf_delivered = True
# remove from queue and replace it
new_request = np.random.choice(
list(set(self.shelfs) - set(self.request_queue))
)
self.request_queue[self.request_queue.index(shelf)] = new_request
# also reward the agents
if self.reward_type == RewardType.GLOBAL:
rewards += 1
elif self.reward_type == RewardType.INDIVIDUAL:
agent_id = self.grid[_LAYER_AGENTS, y, x]
rewards[agent_id - 1] += 1
elif self.reward_type == RewardType.TWO_STAGE:
agent_id = self.grid[_LAYER_AGENTS, y, x]
self.agents[agent_id - 1].has_delivered = True
rewards[agent_id - 1] += 0.5
if shelf_delivered:
self._cur_inactive_steps = 0
else:
self._cur_inactive_steps += 1
self._cur_steps += 1
if (
self.max_inactivity_steps
and self._cur_inactive_steps >= self.max_inactivity_steps
) or (self.max_steps and self._cur_steps >= self.max_steps):
dones = self.n_agents * [True]
else:
dones = self.n_agents * [False]
new_obs = tuple([self._make_obs(agent) for agent in self.agents])
info = {}
return new_obs, list(rewards), dones, info
def render(self, mode="human"):
if not self.renderer:
from rware.rendering import Viewer
self.renderer = Viewer(self.grid_size)
return self.renderer.render(self, return_rgb_array=mode == "rgb_array")
def close(self):
if self.renderer:
self.renderer.close()
def seed(self, seed=None):
...
if __name__ == "__main__":
env = Warehouse(9, 8, 3, 10, 3, 1, 5, None, None, RewardType.GLOBAL)
env.reset()
import time
from tqdm import tqdm
time.sleep(2)
# env.render()
# env.step(18 * [Action.LOAD] + 2 * [Action.NOOP])
for _ in tqdm(range(1000000)):
# time.sleep(2)
# env.render()
actions = env.action_space.sample()
env.step(actions)
|
# -*- coding: utf-8 -*-
from brian2 import *
defaultclock.dt = 0.01*ms
eq_IB_bd='''
dV/dt=1/C_IB_bd*(-J-Isyn-Igap-Iran-Iapp-IL-INa-IK-IAR-IKM-ICaH) : volt
J : amp * meter ** -2
Isyn=IsynRS_LIP_sup+IsynFS_LIP_sup+IsynSI_LIP_sup+IsynRS_LIP_gran+IsynFS_LIP_gran+IsynIB_LIP+IsynSI_LIP_deep+Isyn_FEF+Isyn_mdPul : amp * meter ** -2
IsynRS_LIP_sup : amp * meter ** -2
IsynFS_LIP_sup : amp * meter ** -2
IsynSI_LIP_sup : amp * meter ** -2
IsynRS_LIP_gran : amp * meter ** -2
IsynFS_LIP_gran : amp * meter ** -2
IsynIB_LIP : amp * meter ** -2
IsynSI_LIP_deep : amp * meter ** -2
Isyn_FEF : amp * meter ** -2
Isyn_mdPul : amp * meter ** -2
Igap = Igap_soma+Igap_axon+Igap_ad+Igap_bd : amp * meter ** -2
Igap_soma : amp * meter ** -2
Igap_axon : amp * meter ** -2
Igap_ad : amp * meter ** -2
Igap_bd : amp * meter ** -2
IL=gL_IB_bd*(V-VL_IB_bd) : amp * meter ** -2
INa=gNa_IB_bd*m0**3*h*(V-VNa_IB_bd) : amp * meter ** -2
m0=1/(1+exp((-V-34.5*mV)/10/mV)) : 1
dh/dt=1/tauh*(hinf-h) : 1
hinf=1/(1+exp((V+59.4*mV)/10.7/mV)) : 1
tauh=0.15*ms+1.15*ms/(1+exp((V+33.5*mV)/15/mV)) : second
IK=gK_IB_bd*m**4*(V-VK_IB_bd) : amp * meter ** -2
dm/dt=1/taum*(minf-m) : 1
minf=1/(1+exp((-V-29.5*mV)/10/mV)) : 1
taum=0.25*ms+4.35*ms*exp(-abs(V+10*mV)/10/mV) : second
IAR=gAR_IB_bd*mAR*(V-VAR_IB_bd) : amp * meter ** -2
dmAR/dt=1/taumAR*(mARinf-mAR) : 1
mARinf=1/(1+exp((V+75*mV)/5.5/mV)) : 1
taumAR=1*ms/(exp((-14.6*mV-0.086*V)/mV)+exp((-1.87*mV+0.07*V)/mV)) : second
IKM=gKM_IB_bd*mKM*(V-VKM_IB_bd) : amp * meter ** -2
dmKM/dt=alphaKM*(1-mKM)-betaKM*mKM : 1
alphaKM= 0.02/(1+exp((-V-20*mV)/5/mV))/ms : hertz
betaKM= 0.01*exp((-V-43*mV)/18/mV)/ms: hertz
ICaH=gCaH_IB_bd*mKM**2*(V-VCaH_IB_bd) : amp * meter ** -2
dmCaH/dt=alphaCaH*(1-mCaH)-betaCaH*mCaH : 1
alphaCaH= 1.6/(1+exp(-0.072*(-V-5*mV)/mV))/ms : hertz
betaCaH= 0.02/ms*(V+8.9*mV)/mV/(exp((V+8.9*mV)/5/mV)-1): hertz
Iran=sig_ranIB_bd*randn(): amp * meter ** -2 (constant over dt)
Iapp=sinp*ginp_IB*(V-Vrev_inp) : amp * meter ** -2
dsinp/dt=-sinp/taudinp2 + (1-sinp)/taurinp2*0.5*(1+tanh(Vinp/10/mV)) : 1
dVinp/dt=1/tauinp2*(Vlow-Vinp) : volt
ginp_IB : siemens * meter **-2
'''
##Constants :
C_IB_bd = 0.9* ufarad * cm ** -2
gL_IB_bd=2 * msiemens * cm **-2
VL_IB_bd=-70*mV
gNa_IB_bd=125 * msiemens * cm **-2
VNa_IB_bd=50*mV
gK_IB_bd=10 * msiemens * cm **-2
VK_IB_bd=-95*mV
gAR_IB_bd=115 * msiemens * cm **-2
VAR_IB_bd=-25*mV
gKM_IB_bd=0.75 * msiemens * cm **-2
VKM_IB_bd=-95*mV
gCaH_IB_bd=6.5 * msiemens * cm **-2
VCaH_IB_bd=125*mV
sig_ranIB_bd=0.005* mamp * cm **-2
sig_ranIB_bd=0.005* mamp * cm **-2*0.5
taurinp2=0.1*ms
taudinp2=0.5*ms
tauinp2=taudinp2
if __name__=='__main__' :
start_scope()
IB_bd=NeuronGroup(1,eq_IB_bd,threshold='V>-20*mvolt',refractory=3*ms,method='rk4')
IB_bd.V = '-100*mvolt+10*rand()*mvolt'
IB_bd.h = '0+0.05*rand()'
IB_bd.m = '0+0.05*rand()'
IB_bd.mAR = '0+0.001*rand()'
IB_bd.mKM = '0+0.05*rand()'
IB_bd.mCaH = '0+0.01*rand()'
IB_bd.J='-13 * uA * cmeter ** -2'
V1=StateMonitor(IB_bd,'V',record=[0])
# I1=StateMonitor(IB_bd,'IL',record=[0])
# I2=StateMonitor(IB_bd,'INa',record=[0])
# I3=StateMonitor(IB_bd,'IK',record=[0])
# I4=StateMonitor(IB_bd,'IAR',record=[0])
run(1*second)
figure()
plot(V1.t/second,V1.V[0]/volt)
xlabel('Time (s)')
ylabel('Membrane potential (V)')
title('IB_bd cell')
# figure()
# plot(I1.t/second,I1.IL[0],label='L')
# plot(I1.t/second,I2.INa[0],label='Na')
# plot(I1.t/second,I3.IK[0],label='K')
# plot(I1.t/second,I4.IAR[0],label='AR')
# title('Synaptic currents')
# legend()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 pavle <pavle.portic@tilda.center>
#
# Distributed under terms of the BSD-3-Clause license.
from os import environ
DB_URI = environ.get('DB_URI', None)
if DB_URI is None:
DB_ENGINE = environ.get('DB_ENGINE', 'postgresql')
DB_USER = environ.get('DB_USER', 'root')
DB_PASSWORD = environ.get('DB_PASSWORD', None)
DB_HOST = environ.get('DB_HOST', 'localhost')
DB_PORT = environ.get('DB_PORT', '5432')
DB_NAME = environ.get('DB_NAME', None)
class BaseConfig:
DEBUG = False
TESTING = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = DB_URI or f'{DB_ENGINE}://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}'
JWT_SECRET_KEY = None
class DevConfig(BaseConfig):
DEBUG = True
TESTING = True
JWT_SECRET_KEY = 'default-secret'
class TestConfig(BaseConfig):
DEBUG = True
TESTING = True
JWT_SECRET_KEY = environ.get('SECRET_KEY', 'default-secret')
class ProdConfig(BaseConfig):
DEBUG = False
TESTING = False
JWT_SECRET_KEY = environ.get('SECRET_KEY', None)
configs = {
'development': DevConfig,
'testing': TestConfig,
'production': ProdConfig,
}
|
from .feed_composites import async_get_composite_feed_data
from .feed_data import async_get_feed_data
from .feed_datum import async_get_feed_datum
|
# author: "Finnian Reilly"
# copyright: "Copyright (c) 2001-2012 Finnian Reilly"
# contact: "finnian at eiffel hyphen loop dot com"
# license: "MIT license (See: en.wikipedia.org/wiki/MIT_License)"
# date: "2 June 2010"
# revision: "0.1"
import platform
from os import path
from distutils.dir_util import *
from eiffel_loop import osprocess
global is_windows
is_windows = platform.system () == 'Windows'
# Directory operations requiring root or administrator permissions
def sudo_mkpath (dir_path):
parent_path = path.dirname (dir_path)
if not path.exists (parent_path):
sudo_mkpath (parent_path)
if is_windows:
osprocess.call (['mkdir', dir_path], shell = True)
else:
osprocess.sudo_call (['mkdir', dir_path])
def sudo_copy_tree (src_path, dest_path):
if is_windows:
osprocess.call (['xcopy', '/S', '/I', src_path, dest_path], shell = True)
else:
osprocess.sudo_call (['cp', '-r', src_path, dest_path])
def sudo_remove_tree (dir_path):
if is_windows:
osprocess.call (['rmdir', '/S', '/Q', dir_path], shell = True)
else:
osprocess.sudo_call (['rm', '-r', dir_path])
def make_link (name, target):
if is_windows:
osprocess.call (['mklink', '/D', name, target], shell = True)
else:
return
def make_archive (archive_path, target):
dir_path = path.dirname (target)
target_dir = path.basename (target)
command = ['tar', '--create', '--gzip', '--file=' + archive_path]
if dir_path:
command.extend (['--directory', dir_path, target_dir])
else:
command.append (target_dir)
if is_windows:
osprocess.call (command, shell = True)
else:
osprocess.call (command)
def extract_archive (archive_path, dir_path, env):
command = ['tar', '--extract', '--gunzip', '--file=' + archive_path, '--directory', dir_path]
if is_windows:
osprocess.call (command, shell = True, env = env)
else:
osprocess.call (command, env = env)
|
# -*- coding: utf-8 -*-
# Scrapy settings for jn_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'jn_scraper'
SPIDER_MODULES = ['scraper.spiders']
NEWSPIDER_MODULE = 'scraper.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {'scraper.pipelines.ProductItemPipeline': 200}
|
def enqueue(self,element):
if(self.rear == self.MAX - 1):
print("Error- queue full")
else:
self.rear += 1
self.queue[self.rear] = element
if self.rear == 0:
self.front = 0;
return;
|
# -*- coding: utf-8 -*-
import argparse
import sys
from r2env.tools import print_console, ERROR
from r2env.core import R2Env
HELP_MESSAGE = """
Usage: r2env [-flags] [action] [args...]
Flags:
-h, --help - show this help.
-v, --version - display r2env version.
-m, --meson - use meson instead of acr.
-p, --package - install the dist package instead of building
-l, --list - list available and installed packages
Actions:
init - create ~/.r2env directory.
config - display current .r2env settings.
add [pkg] - build and install given package. See -p and -m
use [pkg] - stow a specific version to be the default.
rm [pkg] - remove package from ~/.r2env
path - show path of current r2 in use.
list - list all packages available to r2env.
shell - enter a new shell with PATH env var set.
purge - remove ~/.r2env
Environment
R2ENV_PATH - specify different path other than ~/.r2env
"""
def show_help():
print_console(HELP_MESSAGE)
def show_version():
print_console(R2Env().version())
actions_with_argument = ["add", "install", "rm", "uninstall"]
actions_with_arguments = ["sh", "shell", "use"]
actions = {
"init": R2Env().init,
"v": show_version,
"version": show_version,
"path": R2Env().get_r2_path,
"sh": R2Env().shell,
"shell": R2Env().shell,
"config": R2Env().show_config,
"ls": R2Env().list_packages,
"list": R2Env().list_packages,
"add": R2Env().install,
"install": R2Env().install,
"installed": R2Env().list_installed_packages,
"uninstall": R2Env().uninstall,
"rm": R2Env().uninstall,
"use": R2Env().use,
"purge": R2Env().purge,
"h": show_help,
"help": show_help
}
def run_action(argp):
action = ""
args = []
if len(argp.args) > 0:
action = argp.args[0]
if len(argp.args) > 1:
args = argp.args[1:]
if argp.version:
print_console(R2Env().version())
elif argp.list:
actions["list"]()
elif action == "":
show_help()
elif action not in actions:
print_console("Invalid action", ERROR)
elif action in actions_with_arguments:
actions[action](" ".join(args))
elif action in actions_with_argument:
exit_if_not_argument_is_set(args, action)
actions[action](args[0], use_meson=argp.meson, use_dist=argp.package)
else:
actions[action]()
def exit_if_not_argument_is_set(args, action):
if len(args) < 1:
if action in ["use", "rm", "uninstall"]:
print_console("[x] Package not defined.", ERROR)
R2Env().list_installed_packages()
else:
print_console("[x] Missing package argument.", ERROR)
R2Env().list_available_packages()
sys.exit(-1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("args", help="run specified action. (Run r2env help for more information)",
action="store", nargs="*", default=[])
#parser.add_argument('args', metavar='args', nargs='+', type=str, help='Specified arguments')
parser.add_argument("-v", "--version", dest="version", help="Show r2env version", action="store_true")
parser.add_argument("-m", "--meson", dest="meson", help="Use meson instead of acr to compile", action="store_true")
parser.add_argument("-p", "--package", dest="package", help="Use binary package for target system if available", action="store_true")
parser.add_argument("-l", "--list", dest="list", help="List available and installed packages", action="store_true")
parser.print_help = show_help
argp = parser.parse_args()
run_action(argp)
if __name__ == "__main__":
main()
|
import datetime as dt
import filecmp
import re
import shutil
import warnings
from copy import deepcopy
from os import listdir
from os.path import basename, dirname, isfile, join
from unittest.mock import patch
import f90nml
import numpy as np
import pandas as pd
import pkg_resources
import pytest
from numpy import testing as npt
from openscm_units import unit_registry
from scmdata import ScmRun
from scmdata.testing import assert_scmdf_almost_equal
import pymagicc.definitions
from pymagicc.config import _is_windows
from pymagicc.errors import InvalidTemporalResError, NoReaderWriterError
from pymagicc.io import (
MAGICCData,
_ConcInReader,
determine_tool,
get_generic_rcp_name,
pull_cfg_from_parameters_out,
pull_cfg_from_parameters_out_file,
read_cfg_file,
read_mag_file_metadata,
to_int,
)
from pymagicc.io.base import _Reader
from pymagicc.io.compact import find_parameter_groups
from pymagicc.io.scen import get_special_scen_code
MAGICC6_DIR = pkg_resources.resource_filename("pymagicc", "MAGICC6/run")
TEST_DATA_DIR = join(dirname(__file__), "test_data")
TEST_OUT_DIR = join(TEST_DATA_DIR, "out_dir")
EXPECTED_FILES_DIR = join(TEST_DATA_DIR, "expected_files")
# Not all files can be read in
TEST_OUT_FILES = listdir(TEST_OUT_DIR)
INVALID_OUT_FILES = [
r"CARBONCYCLE.*OUT",
r".*SUBANN.*\.BINOUT",
r"DAT_VOLCANIC_RF\.BINOUT",
r"PF.*OUT",
r"DATBASKET_.*",
r"PRECIPINPUT.*OUT",
r"TEMP_OCEANLAYERS.*\.BINOUT",
r"INVERSEEMIS\.BINOUT",
r".*INVERSE\_.*EMIS.*OUT",
r"TIMESERIESMIX.*OUT",
r"SUMMARY_INDICATORS.OUT",
]
def run_writing_comparison(res, expected, update=False):
"""Run test that writing file is behaving as expected
Parameters
----------
res : str
File written as part of the test
expected : str
File against which the comparison should be done
update : bool
If True, don't perform the test and instead simply
overwrite the existing expected file with ``res``
Raises
------
AssertionError
If ``update`` is ``False`` and ``res`` and ``expected``
are not identical.
"""
if _is_windows:
pytest.skip("Regression files written on linux")
if update:
shutil.copy(res, expected)
pytest.skip("Updated {}".format(expected))
else:
# try:
assert filecmp.cmp(res, expected, shallow=False)
# except AssertionError:
# import pdb
# pdb.set_trace()
# print("vimdiff {} {}".format(res, expected))
def generic_mdata_tests(mdata, extra_index_cols={"todo": "object"}):
"""Resusable tests to ensure data format"""
assert len(mdata)
assert isinstance(mdata, ScmRun)
index = ["model", "scenario", "region", "variable", "unit", "climate_model"]
if extra_index_cols is not None:
index += list(extra_index_cols.keys())
assert sorted(mdata.meta.columns.tolist()) == sorted(index)
assert mdata["variable"].dtype == "object"
assert mdata["unit"].dtype == "object"
for u in mdata.get_unique_meta("unit"):
if u in ["unknown"]:
continue
# check the unit is recognised by pint
unit_registry(u)
assert mdata["region"].dtype == "object"
assert mdata["scenario"].dtype == "object"
assert mdata["model"].dtype == "object"
assert mdata["climate_model"].dtype == "object"
if extra_index_cols is not None:
for n, t in extra_index_cols.items():
if isinstance(t, str):
assert mdata[n].dtype == t
else:
assert mdata[n].apply(lambda x: isinstance(x, t)).all()
for key in ["units", "unit", "firstdatarow", "dattype"]:
with pytest.raises(KeyError):
mdata.metadata[key]
assert "header" not in mdata.metadata or isinstance(mdata.metadata["header"], str)
def assert_mdata_value(mdata, value, **kwargs):
res = mdata.filter(**kwargs)
assert len(res) == 1
if value < 0.1:
np.testing.assert_allclose(res.timeseries().iloc[0], value, rtol=1e-4)
else:
np.testing.assert_allclose(res.timeseries().iloc[0], value)
def test_cant_find_reader_writer():
test_name = "HISTRCP_CO2I_EMIS.txt"
expected_message = (
r"^"
+ re.escape("Couldn't find appropriate reader for {}.".format(test_name))
+ r"\n"
+ re.escape(
"The file must be one "
"of the following types and the filepath must match its "
"corresponding regular expression:"
)
+ r"(\n.*)*" # dicts aren't ordered in Python3.5
+ re.escape("SCEN: ^.*\\.SCEN$")
+ r"(\n.*)*$"
)
with pytest.raises(NoReaderWriterError, match=expected_message):
determine_tool(join(TEST_DATA_DIR, test_name), "reader")
expected_message = expected_message.replace("reader", "writer")
with pytest.raises(NoReaderWriterError, match=expected_message):
determine_tool(join(TEST_DATA_DIR, test_name), "writer")
def test_not_implemented_writer():
test_name = "DAT_SURFACE_TEMP.BINOUT"
expected_message = re.escape(
"A writer for `^DAT\\_.*\\.BINOUT$` files is not yet implemented"
)
with pytest.raises(NotImplementedError, match=expected_message):
determine_tool(join(TEST_DATA_DIR, test_name), "writer")
def test_get_invalid_tool():
junk_tool = "junk tool"
expected_error_msg = (
r"^\"?"
+ re.escape(
"MAGICCData does not know how to get a {}, "
"valid options are:".format(junk_tool)
)
+ r".*\"?$"
)
with pytest.raises(KeyError, match=expected_error_msg):
determine_tool("EXAMPLE.SCEN", junk_tool)
def test_load_magicc6_emis():
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP_CO2I_EMIS.IN"))
assert len(mdata)
generic_mdata_tests(mdata)
assert_mdata_value(
mdata,
1.7682027e000,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World|R5ASIA",
year=2000,
unit="Gt C / yr",
todo="SET",
)
def test_load_magicc6_emis_hyphen_in_units():
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP_N2OI_EMIS.IN"))
generic_mdata_tests(mdata)
assert_mdata_value(
mdata,
0.288028519,
variable="Emissions|N2O|MAGICC Fossil and Industrial",
region="World|R5ASIA",
year=2000,
unit="Mt N2ON / yr",
todo="SET",
)
def test_load_magicc5_emis():
mdata = MAGICCData(join(MAGICC6_DIR, "MARLAND_CO2I_EMIS.IN"))
generic_mdata_tests(mdata)
assert_mdata_value(
mdata,
6.20403698,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World|Northern Hemisphere",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.495812385,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World|Southern Hemisphere",
year=2002,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.0,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World|Southern Hemisphere",
year=1751,
unit="Gt C / yr",
todo="SET",
)
def test_load_magicc5_emis_not_renamed_error():
test_path = TEST_DATA_DIR
test_name = "MARLAND_CO2_EMIS_FOSSIL&IND.IN"
expected_error_msg = re.escape(
"Cannot determine variable from filepath: {}".format(join(test_path, test_name))
)
with pytest.raises(ValueError, match=expected_error_msg):
MAGICCData(join(test_path, test_name))
def test_load_magicc6_conc():
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP_CO2_CONC.IN"))
assert (mdata["unit"] == "ppm").all()
generic_mdata_tests(mdata)
assert_mdata_value(
mdata,
2.80435733e002,
variable="Atmospheric Concentrations|CO2",
region="World",
year=1048,
unit="ppm",
todo="SET",
)
def test_load_magicc6_conc_old_style_name_umlaut_metadata():
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP_HFC245fa_CONC.IN"))
assert (mdata["unit"] == "ppt").all()
assert mdata.metadata["data"] == "Global average mixing ratio"
generic_mdata_tests(mdata)
assert_mdata_value(
mdata,
0.0,
variable="Atmospheric Concentrations|HFC245fa",
region="World",
year=2000,
unit="ppt",
todo="SET",
)
def test_load_magicc6_conc_old_style_name_with_hyphen():
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP_HFC43-10_CONC.IN"))
assert (mdata["unit"] == "ppt").all()
generic_mdata_tests(mdata)
assert_mdata_value(
mdata,
0.0,
variable="Atmospheric Concentrations|HFC4310",
region="World",
year=2000,
unit="ppt",
todo="SET",
)
def test_load_magicc7_emis_umlaut_metadata():
mdata = MAGICCData(join(TEST_DATA_DIR, "HISTSSP_CO2I_EMIS.IN"))
generic_mdata_tests(mdata)
assert (
mdata.metadata["contact"]
== "Zebedee Nicholls, Australian-German Climate and Energy College, University of Melbourne, zebedee.nicholls@climate-energy-college.org"
)
assert mdata.metadata["description"] == "Test line by näme with ümlauts ëh ça"
assert mdata.metadata["key"] == "value"
assert mdata.metadata["length"] == "27 hands"
assert (mdata["unit"] == "Gt C / yr").all()
assert_mdata_value(
mdata,
0.6638,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World|R5.2REF",
year=2013,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
1.6911,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World|R5.2ASIA",
year=2000,
unit="Gt C / yr",
todo="SET",
)
def test_load_ot():
mdata = MAGICCData(join(MAGICC6_DIR, "MIXED_NOXI_OT.IN"))
generic_mdata_tests(mdata)
assert mdata.metadata["data"] == "Optical Thickness"
assert (
mdata.metadata["description"]
== "the land/ocean ratio of optical depth of NOXI is scaled with the hemispheric EDGAR NOXI emissions. NOXI opt. depth as available on http://www.giss.nasa.gov/data/simodel/trop.aer/"
)
assert (
mdata.metadata["source"]
== "Mixed: EDGAR: www.mnp.nl; NASA-GISS: http://data.giss.nasa.gov/"
)
assert (
mdata.metadata["compiled by"]
== "Malte Meinshausen, Lauder NZ, NCAR/PIK, malte.meinshausen@gmail.com"
)
assert mdata.metadata["date"] == "18-Jul-2006 11:02:48"
assert mdata.metadata["unit normalisation"] == "Normalized to 1 in year 2000"
assert (mdata["unit"] == "dimensionless").all()
assert (mdata["todo"] == "SET").all()
assert (
mdata["variable"] == "Optical Thickness|NOx|MAGICC Fossil and Industrial"
).all()
assert_mdata_value(
mdata,
0.00668115649,
variable="Optical Thickness|NOx|MAGICC Fossil and Industrial",
region="World|Northern Hemisphere|Ocean",
year=1765,
unit="dimensionless",
todo="SET",
)
assert_mdata_value(
mdata,
0.526135104,
variable="Optical Thickness|NOx|MAGICC Fossil and Industrial",
region="World|Northern Hemisphere|Land",
year=1865,
unit="dimensionless",
todo="SET",
)
assert_mdata_value(
mdata,
0.612718845,
variable="Optical Thickness|NOx|MAGICC Fossil and Industrial",
region="World|Southern Hemisphere|Ocean",
year=1965,
unit="dimensionless",
todo="SET",
)
assert_mdata_value(
mdata,
3.70378,
variable="Optical Thickness|NOx|MAGICC Fossil and Industrial",
region="World|Southern Hemisphere|Land",
year=2000,
unit="dimensionless",
todo="SET",
)
def test_load_rf():
mdata = MAGICCData(join(MAGICC6_DIR, "GISS_BCB_RF.IN"))
generic_mdata_tests(mdata)
assert mdata.metadata["data"] == "Radiative Forcing"
assert (
mdata.metadata["description"]
== "BCB - Radiative Forcing of year 2000 (as provided on http://data.giss.nasa.gov/efficacy/) for four MAGICC boxes; scaled over time with optical thickness of file giss_bcb_ot"
)
assert (
mdata.metadata["source"]
== "Original-GISS-Description: NET RADIA. AT 0 MB (W/M+2) ANN E2BCBx6a-E2AarM20A (yr 1850->2000 dBCB*6 - adj"
)
assert (
mdata.metadata["compiled by"]
== "Malte Meinshausen, Lauder NZ, NCAR/PIK, malte.meinshausen@gmail.com"
)
assert mdata.metadata["date"] == "18-Jul-2006 11:05:18"
assert (mdata["unit"] == "W / m^2").all()
assert (mdata["todo"] == "SET").all()
assert (
mdata["variable"] == "Radiative Forcing|Aerosols|Direct Effect|BC|MAGICC AFOLU"
).all()
assert_mdata_value(
mdata,
0.0,
variable="Radiative Forcing|Aerosols|Direct Effect|BC|MAGICC AFOLU",
region="World|Northern Hemisphere|Ocean",
year=1765,
# unit="W / m^2", # bug, can't use ^ in filter now as regexp means it looks for not, propose removing such behaviour in pyam based on usefulness of units and fact that complicated regexp can be re-enabled with regexp=True
todo="SET",
)
assert_mdata_value(
mdata,
0.268436597,
variable="Radiative Forcing|Aerosols|Direct Effect|BC|MAGICC AFOLU",
region="World|Northern Hemisphere|Land",
year=1865,
# unit="W / m^2",
todo="SET",
)
assert_mdata_value(
mdata,
0.443357552,
variable="Radiative Forcing|Aerosols|Direct Effect|BC|MAGICC AFOLU",
region="World|Southern Hemisphere|Ocean",
year=1965,
# unit="W / m^2",
todo="SET",
)
assert_mdata_value(
mdata,
1.53987244,
variable="Radiative Forcing|Aerosols|Direct Effect|BC|MAGICC AFOLU",
region="World|Southern Hemisphere|Land",
year=2000,
# unit="W / m^2",
todo="SET",
)
def test_load_solar_rf():
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP6SCP6to45_SOLAR_RF.IN"))
generic_mdata_tests(mdata)
assert mdata.metadata["data"] == "Radiative Forcing, kept constant after 2250"
assert (
mdata.metadata["description"]
== "Solar irradiance by Lean et al. as recommended for CMIP5, as documented here: http://www.geo.fu-berlin.de/en/met/ag/strat/forschung/SOLARIS/Input_data/CMIP5_solar_irradiance.html - converted into radiative forcing by dividing by 4 (geometrical) and multiplying by 0.7 (albedo) effect. Furthermore, the data is normalized to have an average zero around 1750 (22 years around 1750)."
)
# second definition of metadata wins out
assert (
mdata.metadata["source"]
== "RCP data as presented on http://www.iiasa.ac.at/web-apps/tnt/RcpDb, August 2009"
)
assert (
mdata.metadata["compiled by"]
== "Malte Meinshausen, malte.meinshausen@pik-potsdam.de, www.primap.org"
)
assert mdata.metadata["date"] == "24-Jul-2009 17:05:30"
assert (mdata["unit"] == "W / m^2").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["variable"] == "Radiative Forcing|Solar").all()
assert (mdata["region"] == "World").all()
assert_mdata_value(
mdata,
0.0149792391,
variable="Radiative Forcing|Solar",
region="World",
year=1610,
# unit="W / m^2",
todo="SET",
)
assert_mdata_value(
mdata,
-0.00160201087,
variable="Radiative Forcing|Solar",
year=1865,
# unit="W / m^2",
)
assert_mdata_value(
mdata,
0.0652917391,
variable="Radiative Forcing|Solar",
year=1965,
# unit="W / m^2",
)
assert_mdata_value(
mdata,
0.0446329891,
variable="Radiative Forcing|Solar",
year=2183,
# unit="W / m^2",
)
assert_mdata_value(
mdata,
0.121325148,
variable="Radiative Forcing|Solar",
year=2600,
# unit="W / m^2",
)
def test_load_volcanic_rf():
# test that starting with an annual file doesn't make things blow up
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP_CO2_CONC.IN"))
mdata = mdata.append(join(MAGICC6_DIR, "HIST_VOLCANIC_RF.MON"))
generic_mdata_tests(mdata)
assert mdata.metadata["data"] == "Radiative Forcing"
assert (
mdata.metadata["description"]
== "Monthly Volcanic radiative forcing - relative / unscaled - as in NASA GISS model, using a optical thickness to radiative forcing conversion of -23.5"
)
# second definition of metadata wins out
assert mdata.metadata["source"] == "NASA-GISS: http://data.giss.nasa.gov/"
assert (
mdata.metadata["compiled by"]
== "Tom Wigley and (a little bit by) Malte Meinshausen"
)
assert (
mdata.metadata["date"] == "10-Oct-2008 21:09:25 & updated 22-Sept-2009 18:46:24"
)
assert (mdata.filter(variable="*Forcing*")["unit"] == "W / m^2").all()
assert (mdata["todo"] == "SET").all()
assert (
mdata.filter(variable="*Forcing*")["variable"] == "Radiative Forcing|Volcanic"
).all()
assert_mdata_value(
mdata,
0.0,
variable="Radiative Forcing|Volcanic",
region="World|Northern Hemisphere|Land",
year=1000,
month=1,
# unit="W / m^2", # TODO: fix pyam filtering with / and ^
todo="SET",
)
assert_mdata_value(
mdata,
-0.0187500000,
variable="Radiative Forcing|Volcanic",
region="World|Northern Hemisphere|Land",
year=1002,
month=7,
# unit="W / m^2",
todo="SET",
)
assert_mdata_value(
mdata,
0.0,
variable="Radiative Forcing|Volcanic",
region="World|Northern Hemisphere|Ocean",
year=1013,
month=3,
# unit="W / m^2",
todo="SET",
)
assert_mdata_value(
mdata,
-0.222916667,
variable="Radiative Forcing|Volcanic",
region="World|Southern Hemisphere|Ocean",
year=1119,
month=4,
# unit="W / m^2",
todo="SET",
)
assert_mdata_value(
mdata,
0.0,
variable="Radiative Forcing|Volcanic",
region="World|Southern Hemisphere|Land",
year=2006,
month=12,
# unit="W / m^2",
todo="SET",
)
@pytest.mark.parametrize(
"start_unit,expected_unit",
(
("Wpermsuper2", "W / m^2"),
("Wperm**2", "W / m^2"),
("W per m**2", "W / m^2"),
("W per m ** 2", "W / m^2"),
("Wperm ^ 2", "W / m^2"),
("kg m^-2 s^-2", "kg m^-2 s^-2"),
("kg per m^2 s^-2", "kg/m^2s^-2"),
("kgperm^2 s^-2", "kg/m^2s^-2"),
("kgsuper1perm^2s^-2", "kg^1/m^2s^-2"),
("Gt C / yr", "Gt C / yr"),
),
)
@pytest.mark.parametrize(
"start_file",
(
join(TEST_DATA_DIR, "expected_files", "EXPECTED_HISTRCP85_SOLAR_RF.IN"),
join(TEST_DATA_DIR, "MAG_FORMAT_SAMPLE.MAG"),
),
)
def test_fortran_unit_handling(temp_dir, start_unit, expected_unit, start_file):
start = MAGICCData(start_file)
start["variable"] = "Radiative Forcing|Solar"
start["unit"] = start_unit
test_file = join(temp_dir, basename(start_file))
start.write(test_file, magicc_version=7)
res = MAGICCData(test_file)
res_unit = res.get_unique_meta("unit", no_duplicates=True)
assert res_unit.replace(" ", "") == expected_unit.replace(" ", "")
npt.assert_allclose(
unit_registry(res_unit).to(expected_unit).magnitude, 1, rtol=1e-10
)
def test_load_scen():
mdata = MAGICCData(join(MAGICC6_DIR, "RCP26.SCEN"))
generic_mdata_tests(mdata)
assert (mdata["model"] == "unspecified").all()
assert (mdata["scenario"] == "RCP3PD").all()
assert (mdata["climate_model"] == "unspecified").all()
assert (
mdata.metadata["description"]
== "HARMONISED, EXTENDED FINAL RCP3-PD (Peak&Decline) NOV26; RCP3PD-Contact: IMAGE group, Detlef van Vuuren (Detlef.vanVuuren@pbl.nl)"
)
assert (
mdata.metadata["notes"]
== "DATE: 26/11/2009 11:29:06; MAGICC-VERSION: 6.3.09, 25 November 2009"
)
assert "Final RCP3PD with constant emissions" in mdata.metadata["header"]
assert_mdata_value(
mdata,
6.7350,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
7.5487,
variable="Emissions|N2O",
region="World",
year=2002,
unit="Mt N2ON / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.6470,
variable="Emissions|HFC4310",
region="World",
year=2001,
unit="kt HFC4310 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
11.9769,
variable="Emissions|SOx",
region="World|R5OECD",
year=2005,
unit="Mt S / yr",
todo="SET",
)
assert_mdata_value(
mdata,
18.2123,
variable="Emissions|NMVOC",
region="World|R5OECD",
year=2050,
unit="Mt NMVOC / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|HFC23",
region="World|R5REF",
year=2100,
unit="kt HFC23 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
33.3635,
variable="Emissions|HFC143a",
region="World|R5ASIA",
year=2040,
unit="kt HFC143a / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.8246,
variable="Emissions|SF6",
region="World|R5ASIA",
year=2040,
unit="kt SF6 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
-0.0125,
variable="Emissions|CO2|MAGICC AFOLU",
region="World|R5MAF",
year=2050,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
37.6218,
variable="Emissions|CH4",
region="World|R5MAF",
year=2070,
unit="Mt CH4 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
1.8693,
variable="Emissions|NOx",
region="World|R5LAM",
year=2080,
unit="Mt N / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.4254,
variable="Emissions|BC",
region="World|R5LAM",
year=2090,
unit="Mt BC / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|NH3",
region="World|Bunkers",
year=2000,
unit="Mt N / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.0,
variable="Emissions|SF6",
region="World|Bunkers",
year=2002,
unit="kt SF6 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
5.2133,
variable="Emissions|HFC125",
region="World|R5REF",
year=2125,
unit="kt HFC125 / yr",
todo="SET",
)
def test_load_remind_scen():
mdata = MAGICCData(join(TEST_DATA_DIR, "REMIND_testOneRegi.SCEN"))
generic_mdata_tests(mdata)
assert_mdata_value(
mdata,
6.735,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
1.4845,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2500,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
5.5382,
variable="Emissions|SF6",
region="World",
year=2500,
unit="kt SF6 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
9.4971,
variable="Emissions|SF6",
region="World",
year=2100,
unit="kt SF6 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.000,
variable="Emissions|SF6",
region="World",
year=2150,
unit="kt SF6 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.0138,
variable="Emissions|OC",
region="World|Bunkers",
year=2150,
unit="Mt OC / yr",
todo="SET",
)
def test_load_scen_specify_metadata():
tmodel = "MESSAGE"
tscenario = "RCP45"
tclimate_model = "MAGICC6"
mdata = MAGICCData(
join(MAGICC6_DIR, "RCP26.SCEN"),
columns={
"model": [tmodel],
"scenario": [tscenario],
"climate_model": [tclimate_model],
},
)
generic_mdata_tests(mdata)
assert (mdata["model"] == tmodel).all()
assert (mdata["scenario"] == tscenario).all()
assert (mdata["climate_model"] == tclimate_model).all()
def test_load_scen_metadata_and_year_first_column():
mdata = MAGICCData(join(TEST_DATA_DIR, "RCP26_WORLD_ONLY_YEAR_FIRST_COLUMN.SCEN"))
generic_mdata_tests(mdata)
assert mdata.metadata["description"] == "Generic text: something here"
assert mdata.metadata["notes"] == "Other text"
assert "20" not in mdata.metadata["header"]
assert (
"Final RCP3PD with constant emissions after 2100 using the default RCPtool MAGICC6.3 settings. Compiled by: malte.meinshausen@pik-potsdam.de"
in mdata.metadata["header"]
)
assert all(mdata["scenario"] == "JUNK")
assert_mdata_value(
mdata,
6.7350,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
7.5487,
variable="Emissions|N2O",
region="World",
year=2002,
unit="Mt N2ON / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.6470,
variable="Emissions|HFC4310",
region="World",
year=2001,
unit="kt HFC4310 / yr",
todo="SET",
)
@patch("pymagicc.io._ScenReader._read_data_header_line")
def test_load_scen_last_resort_message(mock_scen_header_line_reader):
mock_scen_header_line_reader.side_effect = AssertionError
error_msg = re.escape(
"This is unexpected, please raise an issue on "
"https://github.com/openscm/pymagicc/issues"
)
with pytest.raises(Exception, match=error_msg):
MAGICCData(join(MAGICC6_DIR, "RCP26.SCEN"))
def test_load_scen_sres():
mdata = MAGICCData(join(MAGICC6_DIR, "SRESA1B.SCEN"))
generic_mdata_tests(mdata)
assert "Antero Hot Springs" in mdata.metadata["header"]
assert_mdata_value(
mdata,
6.8963,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
6.6751,
variable="Emissions|N2O",
region="World",
year=1990,
unit="Mt N2ON / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|HFC4310",
region="World",
year=2000,
unit="kt HFC4310 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
9.8762,
variable="Emissions|SOx",
region="World|OECD90",
year=2010,
unit="Mt S / yr",
todo="SET",
)
assert_mdata_value(
mdata,
28.1940,
variable="Emissions|NMVOC",
region="World|OECD90",
year=2050,
unit="Mt NMVOC / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.0624,
variable="Emissions|HFC23",
region="World|REF",
year=2100,
unit="kt HFC23 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
5.4067,
variable="Emissions|HFC125",
region="World|REF",
year=2100,
unit="kt HFC125 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
15.4296,
variable="Emissions|HFC143a",
region="World|ASIA",
year=2040,
unit="kt HFC143a / yr",
todo="SET",
)
assert_mdata_value(
mdata,
6.4001,
variable="Emissions|SF6",
region="World|ASIA",
year=2040,
unit="kt SF6 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.2613,
variable="Emissions|CO2|MAGICC AFOLU",
region="World|ALM",
year=2050,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
130.1256,
variable="Emissions|CH4",
region="World|ALM",
year=2070,
unit="Mt CH4 / yr",
todo="SET",
)
def test_load_scen7():
mdata = MAGICCData(join(TEST_DATA_DIR, "TESTSCEN7.SCEN7"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "13-Oct-2017 16:45:35"
assert mdata.metadata["description"] == "TEST SCEN7 file"
assert "NOTES" in mdata.metadata["header"]
assert "~~~~~" in mdata.metadata["header"]
assert "Some notes" in mdata.metadata["header"]
assert_mdata_value(
mdata,
6.7350,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
1.1488,
variable="Emissions|CO2|MAGICC AFOLU",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
7.5487,
variable="Emissions|N2O|MAGICC Fossil and Industrial",
region="World",
year=2002,
unit="Mt N2ON / yr",
todo="SET",
)
assert_mdata_value(
mdata,
10.4328,
variable="Emissions|HFC23",
region="World",
year=2001,
unit="kt HFC23 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
11.9769,
variable="Emissions|SOx",
region="World|R5.2OECD",
year=2005,
unit="Mt S / yr",
todo="SET",
)
assert_mdata_value(
mdata,
18.2123,
variable="Emissions|NMVOC",
region="World|R5.2OECD",
year=2050,
unit="Mt NMVOC / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|HFC23",
region="World|R5.2REF",
year=2100,
unit="kt HFC23 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
5.2133,
variable="Emissions|CH2Cl2",
region="World|R5.2REF",
year=2125,
unit="kt CH2Cl2 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
33.3635,
variable="Emissions|HFC143a",
region="World|R5.2ASIA",
year=2040,
unit="kt HFC143a / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.8246,
variable="Emissions|SO2F2",
region="World|R5.2ASIA",
year=2040,
unit="kt SO2F2 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
-0.0125,
variable="Emissions|CO2|MAGICC AFOLU",
region="World|R5.2MAF",
year=2050,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
37.6218,
variable="Emissions|CH4",
region="World|R5.2MAF",
year=2070,
unit="Mt CH4 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
1.8693,
variable="Emissions|NOx",
region="World|R5.2LAM",
year=2080,
unit="Mt N / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.4254,
variable="Emissions|BC|MAGICC AFOLU",
region="World|R5.2LAM",
year=2090,
unit="Mt BC / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|NH3",
region="World|Bunkers",
year=2000,
unit="Mt N / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|SO2F2",
region="World|Bunkers",
year=2002,
unit="kt SO2F2 / yr",
todo="SET",
)
def test_load_rewritten_scen7(temp_dir):
write_file = join(temp_dir, "REWRITTEN.SCEN7")
cols = {"model": ["IMAGE"], "scenario": ["RCP26"], "climate_model": ["MAGICC6"]}
writer = MAGICCData(join(MAGICC6_DIR, "RCP26.SCEN"), columns=cols)
warn_msg = (
"MAGICC6 RCP region naming (R5*) is not compatible with MAGICC7, "
"automatically renaming to MAGICC7 compatible regions (R5.2*)"
)
with warnings.catch_warnings(record=True) as warn_autorename_region:
writer.write(write_file, magicc_version=7)
assert len(warn_autorename_region) == 1
assert warn_msg == str(warn_autorename_region[0].message)
mdata = MAGICCData(write_file, columns=cols)
generic_mdata_tests(mdata)
assert sorted(mdata["region"].unique().tolist()) == sorted(
["World"]
+ [
"World|{}".format(v)
for v in [
"R5.2ASIA",
"R5.2MAF",
"R5.2REF",
"R5.2LAM",
"R5.2OECD",
"Bunkers",
]
]
)
assert_mdata_value(
mdata,
6.7350,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
1.1488,
variable="Emissions|CO2|MAGICC AFOLU",
region="World",
year=2000,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
7.5487,
variable="Emissions|N2O",
region="World",
year=2002,
unit="Mt N2ON / yr",
todo="SET",
)
assert_mdata_value(
mdata,
10.4328,
variable="Emissions|HFC23",
region="World",
year=2001,
unit="kt HFC23 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
11.9769,
variable="Emissions|SOx",
region="World|R5.2OECD",
year=2005,
unit="Mt S / yr",
todo="SET",
)
assert_mdata_value(
mdata,
18.2123,
variable="Emissions|NMVOC",
region="World|R5.2OECD",
year=2050,
unit="Mt NMVOC / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|HFC23",
region="World|R5.2REF",
year=2100,
unit="kt HFC23 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
33.3635,
variable="Emissions|HFC143a",
region="World|R5.2ASIA",
year=2040,
unit="kt HFC143a / yr",
todo="SET",
)
assert_mdata_value(
mdata,
-0.0125,
variable="Emissions|CO2|MAGICC AFOLU",
region="World|R5.2MAF",
year=2050,
unit="Gt C / yr",
todo="SET",
)
assert_mdata_value(
mdata,
37.6218,
variable="Emissions|CH4",
region="World|R5.2MAF",
year=2070,
unit="Mt CH4 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
1.8693,
variable="Emissions|NOx",
region="World|R5.2LAM",
year=2080,
unit="Mt N / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.4254,
variable="Emissions|BC",
region="World|R5.2LAM",
year=2090,
unit="Mt BC / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0,
variable="Emissions|NH3",
region="World|Bunkers",
year=2000,
unit="Mt N / yr",
todo="SET",
)
def test_load_rewrite_scen7_scen_loop(temp_dir):
write_file_rewritten = join(temp_dir, "REWRITTEN.SCEN7")
cols = {"model": ["IMAGE"], "scenario": ["RCP26"], "climate_model": ["MAGICC6"]}
writer = MAGICCData(join(MAGICC6_DIR, "RCP26.SCEN"), columns=cols)
with warnings.catch_warnings(record=True): # warning tested elsewhere
writer.write(write_file_rewritten, magicc_version=7)
rewritten = MAGICCData(write_file_rewritten, columns=cols)
write_file_loop = join(temp_dir, "LOOP.SCEN")
warn_msg = (
"MAGICC7 RCP region naming (R5.2*) is not compatible with MAGICC6, "
"automatically renaming to MAGICC6 compatible regions (R5*)"
)
with warnings.catch_warnings(record=True) as warn_autorename_region:
rewritten.write(write_file_loop, magicc_version=6)
assert len(warn_autorename_region) == 1
assert warn_msg == str(warn_autorename_region[0].message)
res = MAGICCData(write_file_loop)
assert sorted(res["region"].unique().tolist()) == sorted(
["World"]
+ [
"World|{}".format(v)
for v in ["R5ASIA", "R5MAF", "R5REF", "R5LAM", "R5OECD", "Bunkers",]
]
)
@pytest.mark.parametrize(
"variable_filename,variable,exp_error",
(
("CO2I", "Emissions|CO2|MAGICC Fossil and Industrial", False),
("CO2", "Emissions|CO2|MAGICC Fossil and Industrial", True),
),
)
def test_write_scen7_single_variable(temp_dir, variable_filename, variable, exp_error):
tfilename = "SINGLE_VARIABLE_TEST_{}.SCEN7".format(variable_filename)
start = MAGICCData(
data=range(10),
index=range(2020, 2030),
columns={
"variable": variable,
"unit": "GtC / yr",
"region": "World",
"scenario": "Test",
"model": "test",
"todo": "SET",
},
)
start.metadata = {"header": "test_write_scen7_single_variable"}
tfile = join(temp_dir, tfilename)
if exp_error:
var_filename = pymagicc.definitions.convert_magicc7_to_openscm_variables(
"{}_EMIS".format(variable_filename)
)
error_msg = re.escape(
"Your filename variable, {}, does not match the data "
"variable, {}".format(var_filename, variable)
)
with pytest.raises(ValueError, match=error_msg):
start.write(tfile, magicc_version=7)
else:
start.write(tfile, magicc_version=7)
res = MAGICCData(tfile)
assert res.get_unique_meta("variable", no_duplicates=True) == variable
npt.assert_allclose(res.values.squeeze(), range(10))
def test_load_scen7_mhalo():
mdata = MAGICCData(join(TEST_DATA_DIR, "TEST_MHALO.SCEN7"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "22-Dec-2017 18:07:18"
assert mdata.metadata["source"] == "Raw sauce."
assert (
mdata.metadata["description"]
== "This scenario file was compiled to run MAGICC."
)
assert "NOTES" in mdata.metadata["header"]
assert "~~~~~" in mdata.metadata["header"]
assert "HCFC22" in mdata.metadata["header"]
assert_mdata_value(
mdata,
0.343277,
variable="Emissions|CFC11",
region="World",
year=2015,
unit="kt CFC11 / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.925486,
variable="Emissions|CH3Br",
region="World",
year=2065,
unit="kt CH3Br / yr",
todo="SET",
)
assert_mdata_value(
mdata,
0.047699,
variable="Emissions|Halon1301",
region="World",
year=2100,
unit="kt Halon1301 / yr",
todo="SET",
)
def test_load_prn():
mdata = MAGICCData(join(MAGICC6_DIR, "RCPODS_WMO2006_Emissions_A1.prn"))
generic_mdata_tests(mdata)
# top line should be ignored
assert "16 1850 2500" not in mdata.metadata["header"]
assert mdata.metadata["date"] == "4th September 2009"
assert (
mdata.metadata["description"]
== "1951-2100 Baseline emission file generated by John Daniel and Guus Velders for WMO2006, Chapter 8. (2/3/06); earlier emisisons for CFC-11, CFC-12, CFC-113, CFC-114, CCl4 extended by John Daniel (pers. Communication, 25th"
)
assert (
"ons have been adapted to match mixing ratio observations by Butler et al. 1999 with emissions starting in 1850. HCFC-142b from 2005 to 2007 adapted to Montzka et al. 2009 with emission growth/decline rates kept the same after 2007."
in mdata.metadata["header"]
)
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert not (mdata["unit"] == "t / yr").all()
assert_mdata_value(
mdata, 0, variable="Emissions|CFC11", year=1850, unit="t CFC11 / yr"
)
assert_mdata_value(
mdata, 444, variable="Emissions|CFC115", year=1965, unit="t CFC115 / yr"
)
assert_mdata_value(
mdata, 10743, variable="Emissions|Halon1211", year=1996, unit="t Halon1211 / yr"
)
assert_mdata_value(
mdata, 1062, variable="Emissions|Halon1301", year=2017, unit="t Halon1301 / yr"
)
assert_mdata_value(
mdata, 3511082, variable="Emissions|CH3Cl", year=2500, unit="t CH3Cl / yr"
)
def test_load_prn_no_units():
mdata = MAGICCData(join(MAGICC6_DIR, "WMO2006_ODS_A1Baseline.prn"))
generic_mdata_tests(mdata)
# top line should be ignored
assert "6 1950 2100" not in mdata.metadata["header"]
assert (
"6/19/2006A1: Baseline emission file generated by John Daniel and Guus Velders"
in mdata.metadata["header"]
)
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert not (mdata["unit"] == "t / yr").all()
assert_mdata_value(
mdata, 139965, variable="Emissions|CFC12", year=1950, unit="t CFC12 / yr"
)
assert_mdata_value(
mdata, 3511082, variable="Emissions|CH3Cl", year=2100, unit="t CH3Cl / yr"
)
def test_load_prn_mixing_ratios_years_label():
mdata = MAGICCData(join(MAGICC6_DIR, "RCPODS_WMO2006_MixingRatios_A1.prn"))
generic_mdata_tests(mdata)
# top line should be ignored
assert "17 1850 2100" not in mdata.metadata["header"]
assert mdata.metadata["data"] == "Global average mixing ratios"
assert (
mdata.metadata["description"]
== "1951-2100 Baseline mixing ratio file generated by John Daniel and Guus Velders for WMO2006, Chapter 8. (2/3/06); CH3CL updated to reflect MAGICC6 timeseries after 1955 and lower 2000 concentrations closer to 535ppt in line with"
)
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["unit"] == "ppt").all()
assert_mdata_value(mdata, 0, variable="Atmospheric Concentrations|CFC12", year=1850)
assert_mdata_value(
mdata, 5.058, variable="Atmospheric Concentrations|CFC114", year=1965
)
assert_mdata_value(
mdata, 13.81, variable="Atmospheric Concentrations|HCFC141b", year=2059
)
assert_mdata_value(
mdata, 0.007, variable="Atmospheric Concentrations|Halon2402", year=2091
)
assert_mdata_value(
mdata, 538, variable="Atmospheric Concentrations|CH3Cl", year=2100
)
def test_load_rcp_historical_dat_emissions():
test_file = "20THCENTURY_EMISSIONS.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "26/11/2009 11:29:06"
assert (mdata["variable"].str.startswith("Emissions|")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["climate_model"] == "MAGICC6.3.09, 25 November 2009").all()
assert (mdata["scenario"] == "20THCENTURY, FINAL RELEASE, 26 Nov. 2009").all()
assert_mdata_value(
mdata,
0.003,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=1766,
unit="Gt C / yr",
)
assert_mdata_value(
mdata,
2.4364481,
variable="Emissions|CH4",
region="World",
year=1767,
unit="Mt CH4 / yr",
)
assert_mdata_value(
mdata,
3511.0820,
variable="Emissions|CH3Cl",
region="World",
year=2005,
unit="kt CH3Cl / yr",
)
def test_load_rcp_historical_dat_concentrations():
test_file = "20THCENTURY_MIDYEAR_CONCENTRATIONS.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "26/11/2009 11:29:06"
assert (mdata["variable"].str.startswith("Atmospheric Concentrations|")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["climate_model"] == "MAGICC6.3.09, 25 November 2009").all()
assert (mdata["scenario"] == "PRE-2005 DATA, FINAL RELEASE, 26 Nov. 2009").all()
assert_mdata_value(
mdata,
277.8388,
variable="Atmospheric Concentrations|CO2 Equivalent",
region="World",
year=1766,
unit="ppm",
)
assert_mdata_value(
mdata,
278.68732,
variable="Atmospheric Concentrations|CO2 Equivalent|Kyoto Gases",
region="World",
year=1767,
unit="ppm",
)
assert_mdata_value(
mdata,
126.7694,
variable="Atmospheric Concentrations|HFC134a Equivalent|F-Gases",
region="World",
year=2005,
unit="ppt",
)
assert_mdata_value(
mdata,
1003.5801,
variable="Atmospheric Concentrations|CFC12 Equivalent|Montreal Protocol Halogen Gases",
region="World",
year=2005,
unit="ppt",
)
assert_mdata_value(
mdata,
538,
variable="Atmospheric Concentrations|CH3Cl",
region="World",
year=2005,
unit="ppt",
)
def test_load_rcp_historical_dat_forcings():
test_file = "20THCENTURY_MIDYEAR_RADFORCING.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert (
mdata.metadata["date"]
== "26/11/2009 11:29:06 (updated description, 30 May 2010)."
)
assert (mdata["variable"].str.startswith("Radiative Forcing")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["unit"] == "W / m^2").all()
assert (mdata["climate_model"] == "MAGICC6.3.09, 25 November 2009").all()
assert (
mdata["scenario"] == "20THCENTURY/PRE2005 RUN, FINAL RELEASE, 26 Nov. 2009"
).all()
assert_mdata_value(
mdata,
0.12602655,
variable="Radiative Forcing",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.0070393750,
variable="Radiative Forcing|Solar",
region="World",
year=1767,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.10018846,
variable="Radiative Forcing|Black Carbon on Snow",
region="World",
year=2005,
# unit="W / m^2"
)
def test_load_rcp_projections_dat_emissions():
test_file = "RCP3PD_EMISSIONS.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "26/11/2009 11:29:06"
assert (mdata["variable"].str.startswith("Emissions|")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["climate_model"] == "MAGICC6.3.09, 25 November 2009").all()
assert (
mdata["scenario"] == "RCP3PD (RCP3-Peak&Decline), FINAL RELEASE, 26 Nov. 2009"
).all()
assert_mdata_value(
mdata,
-0.9308,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2100,
unit="Gt C / yr",
)
assert_mdata_value(
mdata,
2.4364481,
variable="Emissions|CH4",
region="World",
year=1767,
unit="Mt CH4 / yr",
)
assert_mdata_value(
mdata,
3511.0820,
variable="Emissions|CH3Cl",
region="World",
year=2500,
unit="kt CH3Cl / yr",
)
def test_load_rcp_projections_dat_concentrations():
test_file = "RCP3PD_MIDYEAR_CONCENTRATIONS.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "26/11/2009 11:29:06"
assert (mdata["variable"].str.startswith("Atmospheric Concentrations|")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["climate_model"] == "MAGICC6.3.09, 25 November 2009").all()
assert (
mdata["scenario"] == "RCP3PD (RCP3-Peak&Decline), FINAL RELEASE, 26 Nov. 2009"
).all()
assert_mdata_value(
mdata,
277.8388,
variable="Atmospheric Concentrations|CO2 Equivalent",
region="World",
year=1766,
unit="ppm",
)
assert_mdata_value(
mdata,
475.19275,
variable="Atmospheric Concentrations|CO2 Equivalent|Kyoto Gases",
region="World",
year=2100,
unit="ppm",
)
assert_mdata_value(
mdata,
900.02269,
variable="Atmospheric Concentrations|HFC134a Equivalent|F-Gases",
region="World",
year=2500,
unit="ppt",
)
assert_mdata_value(
mdata,
10.883049,
variable="Atmospheric Concentrations|CFC12 Equivalent|Montreal Protocol Halogen Gases",
region="World",
year=2500,
unit="ppt",
)
assert_mdata_value(
mdata,
538.02891,
variable="Atmospheric Concentrations|CH3Cl",
region="World",
year=2500,
unit="ppt",
)
def test_load_rcp_projections_dat_forcings():
test_file = "RCP3PD_MIDYEAR_RADFORCING.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "26/11/2009 11:29:06 (updated description)"
assert (mdata["variable"].str.startswith("Radiative Forcing")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["unit"] == "W / m^2").all()
assert (mdata["climate_model"] == "MAGICC6.3.09, 25 November 2009").all()
assert (mdata["scenario"] == "RCP3PD, FINAL RELEASE, 26. Nov 2009").all()
assert_mdata_value(
mdata,
0.12602655,
variable="Radiative Forcing",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.11622211,
variable="Radiative Forcing|Volcanic",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.016318812,
variable="Radiative Forcing|Anthropogenic",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.015363514,
variable="Radiative Forcing|Greenhouse Gases",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.015363514,
variable="Radiative Forcing|Greenhouse Gases|Kyoto Gases",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.015363514,
variable="Radiative Forcing|CO2, CH4 and N2O",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0,
variable="Radiative Forcing|F-Gases",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0,
variable="Radiative Forcing|Montreal Protocol Halogen Gases",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.000017767194,
variable="Radiative Forcing|Aerosols|Direct Effect",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.00025010344,
variable="Radiative Forcing|Aerosols|Direct Effect|MAGICC AFOLU",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
-0.00019073512,
variable="Radiative Forcing|Aerosols|Direct Effect|Mineral Dust",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
-0.00080145063,
variable="Radiative Forcing|Aerosols|Indirect Effect",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0,
variable="Radiative Forcing|Stratospheric Ozone",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.0014060381,
variable="Radiative Forcing|Tropospheric Ozone",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.00060670657,
variable="Radiative Forcing|CH4 Oxidation Stratospheric H2O",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
-0.00038147024,
variable="Radiative Forcing|Land-use Change",
region="World",
year=1766,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.19056187,
variable="Radiative Forcing|Solar",
region="World",
year=2100,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0.038412234,
variable="Radiative Forcing|Black Carbon on Snow",
region="World",
year=2500,
# unit="W / m^2"
)
def test_load_sample_dat_emissions():
test_file = "SAMPLE_EMISSIONS.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["content"] == "Global annual emissions"
assert mdata.metadata["database"] == "database info"
assert mdata.metadata["note"] == ["notes", "second line of notes"]
assert (mdata["variable"].str.startswith("Emissions|")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["scenario"] == "SCEN").all()
assert (mdata["climate_model"] == "MAGICCmagicc version").all()
assert_mdata_value(
mdata,
-0.9308,
variable="Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=2100,
unit="Gt C / yr",
)
def test_load_sample_dat_concentrations():
test_file = "SAMPLE_MIDYEAR_CONCENTRATIONS.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["content"] == "mid-year concentrations"
assert mdata.metadata["file produced by"] == "file production information"
assert mdata.metadata["note"] == ["one line of notes"]
assert (mdata["variable"].str.startswith("Atmospheric Concentrations|")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["scenario"] == "SCEN2").all()
assert (mdata["climate_model"] == "MAGICCmagicc version").all()
assert_mdata_value(
mdata,
277.01467,
variable="Atmospheric Concentrations|CO2 Equivalent",
region="World",
year=1765,
unit="ppm",
)
def test_load_sample_dat_radiative_forcings():
test_file = "SAMPLE_MIDYEAR_RADFORCING.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert mdata.metadata["content"] == "annual average, global mean radiative forcing"
assert mdata.metadata["further info"] == "further info"
assert mdata.metadata["documentation"] == "doc info"
assert mdata.metadata["note"] == ["three lines", "of", "notes"]
assert (mdata["variable"].str.startswith("Radiative Forcing")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["scenario"] == "SCEN3").all()
assert (mdata["unit"] == "W / m^2").all()
assert_mdata_value(
mdata, 0.0, variable="Radiative Forcing", region="World", year=1765,
)
assert_mdata_value(
mdata,
0.084416719,
variable="Radiative Forcing|Solar",
region="World",
year=2500,
)
def test_load_sample_dat_effective_radiative_forcings():
test_file = "SAMPLE_MIDYEAR_EFFECTIVERADFORCING.DAT"
mdata = MAGICCData(join(TEST_DATA_DIR, test_file))
generic_mdata_tests(mdata)
assert (
mdata.metadata["content"]
== "annual average, global mean effective radiative forcing"
)
assert mdata.metadata["further info"] == "further info"
assert mdata.metadata["documentation"] == "doc info"
assert mdata.metadata["note"] == ["three lines", "of", "notes"]
assert (mdata["variable"].str.startswith("Effective Radiative Forcing")).all()
assert (mdata["region"] == "World").all()
assert (mdata["todo"] == "SET").all()
assert (mdata["scenario"] == "SCN32").all()
assert (mdata["unit"] == "W / m^2").all()
assert_mdata_value(
mdata, 0.0, variable="Effective Radiative Forcing", region="World", year=1765,
)
assert_mdata_value(
mdata,
0.084416719,
variable="Effective Radiative Forcing|Solar",
region="World",
year=2500,
)
@pytest.mark.parametrize(
"input, expected",
[
("rCp26", "rcp26"),
("rCp3pd", "rcp26"),
("rCp45", "rcp45"),
("rCp6", "rcp60"),
("rCp60", "rcp60"),
("rCp85", "rcp85"),
],
)
def test_generic_rcp_names(input, expected):
for tin in [input, input.upper(), input.lower()]:
result = get_generic_rcp_name(input)
assert result == expected
def test_generic_rcp_name_error():
tinput = "junk"
error_msg = re.escape("No generic name for input: {}".format(tinput))
with pytest.raises(ValueError, match=error_msg):
get_generic_rcp_name(tinput)
def test_load_cfg_with_magicc_input():
test_file = "MAGCFG_BULKPARAS.CFG"
expected_error_msg = (
r"^"
+ re.escape("MAGCCInput cannot read .CFG files like ")
+ r".*{}".format(test_file)
+ re.escape(", please use pymagicc.io.read_cfg_file")
+ r"$"
)
with pytest.raises(ValueError, match=expected_error_msg):
MAGICCData(join(MAGICC6_DIR, test_file))
def test_load_cfg():
cfg = read_cfg_file(join(MAGICC6_DIR, "MAGCFG_BULKPARAS.CFG"))
assert cfg["NML_BULKPARALIST"]["BULKOUT_NRUNS"] == 190
assert cfg["NML_BULKPARALIST"]["BULKOUT_N_INDICATORS"] == 323
assert cfg["NML_BULKPARALIST"]["BULKOUT_CRASHED_RESULTVALUE"] == -999.999
cfg = read_cfg_file(join(MAGICC6_DIR, "MAGCFG_DEFAULTALL_69.CFG"))
assert cfg["NML_YEARS"]["STARTYEAR"] == 1500
assert cfg["NML_YEARS"]["ENDYEAR"] == 4200
assert cfg["NML_YEARS"]["STEPSPERYEAR"] == 12
assert cfg["NML_ALLCFGS"]["RUNNAME"] == "RUN001"
assert cfg["NML_ALLCFGS"]["RUNDATE"] == "No Date specified."
assert cfg["NML_ALLCFGS"]["CO2_FEEDBACKFACTOR_GPP"] == 0.015
assert cfg["NML_ALLCFGS"]["CH4_INCL_CH4OX"] == 1
assert cfg["NML_ALLCFGS"]["CH4_S"] == -0.32
assert (
cfg["NML_ALLCFGS"]["FILE_MHALO_CONC"]
== "Mixing ratios WMO2002 version5b_A1.prn"
)
assert cfg["NML_ALLCFGS"]["GEN_RCPPLUSREGIONS2NH"] == [0.9, 1.0, 0.8, 0.6, 0.3, 0.9]
assert cfg["NML_ALLCFGS"]["MHALO_GWP"] == [
3800,
8100,
4800,
10000,
7370,
1400,
146,
1500,
725,
1800,
1890,
0,
5400,
1640,
5,
13,
]
assert cfg["NML_ALLCFGS"]["MHALO_CL_ATOMS"] == [
3,
2,
3,
2,
1,
4,
3,
1,
2,
1,
1,
0,
0,
0,
0,
1,
]
assert cfg["NML_ALLCFGS"]["MHALO_NAMES"] == [
"CFC_11",
"CFC_12",
"CFC_113",
"CFC_114",
"CFC_115",
"CARB_TET",
"MCF",
"HCFC_22",
"HCFC_141B",
"HCFC_142B",
"HALON1211",
"HALON1202",
"HALON1301",
"HALON2402",
"CH3BR",
"CH3CL",
]
assert cfg["NML_ALLCFGS"]["MHALO_FORMULA"] == [
"(CCL3F)",
"(CCL2F2)",
"(C2CL3F3)",
"(C2CL2F4)",
"(C2CLF5)",
"(CCL4)",
"(CH3CCL3)",
"(CHCLF2)",
"(CH3CCL2F)",
"(CH3CCLF2)",
"(CF2CLBR)",
"(CBR2F2)",
"(CF3BR)",
"((CF2BR)2)",
"(CH3BR)",
"(CH3CL)",
]
assert cfg["NML_ALLCFGS"]["RF_REGIONS_STRATOZ"] == [
-0.01189,
-0.02267,
-0.06251,
-0.24036,
]
@pytest.mark.xfail(reason="f90nml cannot handle / in namelist properly")
def test_load_cfg_with_slash_in_units():
cfg = read_cfg_file(join(TEST_DATA_DIR, "F90NML_BUG.CFG"))
assert cfg["THISFILE_SPECIFICATIONS"]["THISFILE_DATACOLUMNS"] == 4
assert cfg["THISFILE_SPECIFICATIONS"]["THISFILE_FIRSTYEAR"] == 1000
assert cfg["THISFILE_SPECIFICATIONS"]["THISFILE_LASTYEAR"] == 2006
assert cfg["THISFILE_SPECIFICATIONS"]["THISFILE_ANNUALSTEPS"] == 12
assert cfg["THISFILE_SPECIFICATIONS"]["THISFILE_FIRSTDATAROW"] == 21
# this fails
assert cfg["THISFILE_SPECIFICATIONS"]["THISFILE_UNITS"] == "W / m^2"
@pytest.mark.parametrize(
"test_file",
[
join(TEST_OUT_DIR, "DAT_SURFACE_TEMP.OUT"),
join(TEST_DATA_DIR, "out_quoted_units", "DAT_SURFACE_TEMP.OUT"),
],
)
def test_load_out(test_file):
mdata = MAGICCData(test_file)
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert "__MAGICC 6.X DATA OUTPUT FILE__" in mdata.metadata["header"]
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["unit"] == "K").all()
assert (mdata["variable"] == "Surface Temperature").all()
assert_mdata_value(
mdata,
0.0079979091,
variable="Surface Temperature",
region="World",
year=1767,
unit="K",
)
assert_mdata_value(
mdata,
-0.022702952,
variable="Surface Temperature",
region="World",
year=1965,
unit="K",
)
assert_mdata_value(
mdata,
0.010526585,
variable="Surface Temperature",
region="World|Northern Hemisphere|Ocean",
year=1769,
unit="K",
)
assert_mdata_value(
mdata,
-0.25062424,
variable="Surface Temperature",
region="World|Southern Hemisphere|Ocean",
year=1820,
unit="K",
)
assert_mdata_value(
mdata,
1.8515042,
variable="Surface Temperature",
region="World|Northern Hemisphere|Land",
year=2093,
unit="K",
)
assert_mdata_value(
mdata,
0,
variable="Surface Temperature",
region="World|Southern Hemisphere|Land",
year=1765,
unit="K",
)
def test_load_out_emis():
mdata = MAGICCData(join(TEST_OUT_DIR, "DAT_BCB_EMIS.OUT"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert "__MAGICC 6.X DATA OUTPUT FILE__" in mdata.metadata["header"]
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["unit"] == "Mt BC / yr").all()
assert (mdata["variable"] == "Emissions|BC|MAGICC AFOLU").all()
assert_mdata_value(
mdata,
0,
variable="Emissions|BC|MAGICC AFOLU",
region="World",
year=1765,
unit="Mt BC / yr",
)
assert_mdata_value(
mdata,
2.0025816,
variable="Emissions|BC|MAGICC AFOLU",
region="World",
year=1965,
unit="Mt BC / yr",
)
assert_mdata_value(
mdata,
0.0,
variable="Emissions|BC|MAGICC AFOLU",
region="World|Northern Hemisphere|Ocean",
year=1769,
unit="Mt BC / yr",
)
assert_mdata_value(
mdata,
0.0,
variable="Emissions|BC|MAGICC AFOLU",
region="World|Southern Hemisphere|Ocean",
year=1820,
unit="Mt BC / yr",
)
assert_mdata_value(
mdata,
0.71504927,
variable="Emissions|BC|MAGICC AFOLU",
region="World|Northern Hemisphere|Land",
year=2093,
unit="Mt BC / yr",
)
assert_mdata_value(
mdata,
0.48390716,
variable="Emissions|BC|MAGICC AFOLU",
region="World|Southern Hemisphere|Land",
year=2100,
unit="Mt BC / yr",
)
def test_load_out_slash_and_caret_in_rf_units():
mdata = MAGICCData(join(TEST_OUT_DIR, "DAT_SOXB_RF.OUT"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert "__MAGICC 6.X DATA OUTPUT FILE__" in mdata.metadata["header"]
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["unit"] == "W / m^2").all()
assert (
mdata["variable"] == "Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC AFOLU"
).all()
assert_mdata_value(
mdata,
-0.00025099784,
variable="Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC AFOLU",
region="World",
year=1767,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
-0.032466593,
variable="Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC AFOLU",
region="World",
year=1965,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
-0.0014779559,
variable="Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC AFOLU",
region="World|Northern Hemisphere|Ocean",
year=1769,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
-0.024316933,
variable="Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC AFOLU",
region="World|Northern Hemisphere|Land",
year=2093,
# unit="W / m^2"
)
assert_mdata_value(
mdata,
0,
variable="Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC AFOLU",
region="World|Southern Hemisphere|Land",
year=1765,
# unit="W / m^2"
)
def test_load_out_slash_and_caret_in_heat_content_units():
mdata = MAGICCData(join(TEST_OUT_DIR, "DAT_HEATCONTENT_AGGREG_DEPTH1.OUT"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert "__MAGICC 6.X DATA OUTPUT FILE__" in mdata.metadata["header"]
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["unit"] == "10^22J").all()
assert (mdata["variable"] == "Heat Content|Ocean|Depth 1").all()
assert_mdata_value(
mdata,
0.046263236,
variable="Heat Content|Ocean|Depth 1",
region="World",
year=1767,
# unit="10^22J"
)
assert_mdata_value(
mdata,
3.4193050,
variable="Heat Content|Ocean|Depth 1",
region="World",
year=1965,
# unit="10^22J"
)
assert_mdata_value(
mdata,
0.067484257,
variable="Heat Content|Ocean|Depth 1",
region="World|Northern Hemisphere|Ocean",
year=1769,
# unit="10^22J"
)
assert_mdata_value(
mdata,
-4.2688102,
variable="Heat Content|Ocean|Depth 1",
region="World|Southern Hemisphere|Ocean",
year=1820,
# unit="10^22J"
)
assert_mdata_value(
mdata,
0,
variable="Heat Content|Ocean|Depth 1",
region="World|Northern Hemisphere|Land",
year=2093,
# unit="10^22J"
)
assert_mdata_value(
mdata,
0,
variable="Heat Content|Ocean|Depth 1",
region="World|Southern Hemisphere|Land",
year=1765,
# unit="10^22J"
)
def test_load_out_ocean_layers():
mdata = MAGICCData(join(TEST_OUT_DIR, "TEMP_OCEANLAYERS.OUT"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert (
"__MAGICC 6.X TEMP_OCEANLAYERS DATA OUTPUT FILE__" in mdata.metadata["header"]
)
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["unit"] == "K").all()
assert_mdata_value(
mdata,
0,
variable="Ocean Temperature|Layer 1",
region="World",
year=1765,
unit="K",
)
assert_mdata_value(
mdata,
0.10679213,
variable="Ocean Temperature|Layer 3",
region="World",
year=1973,
unit="K",
)
assert_mdata_value(
mdata,
0.13890633,
variable="Ocean Temperature|Layer 50",
region="World",
year=2100,
unit="K",
)
def test_load_out_ocean_layers_hemisphere():
mdata = MAGICCData(join(TEST_OUT_DIR, "TEMP_OCEANLAYERS_NH.OUT"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert (
"__MAGICC 6.X TEMP_OCEANLAYERS DATA OUTPUT FILE__" in mdata.metadata["header"]
)
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["unit"] == "K").all()
assert_mdata_value(
mdata,
0,
variable="Ocean Temperature|Layer 1",
region="World|Northern Hemisphere|Ocean",
year=1765,
unit="K",
)
assert_mdata_value(
mdata,
0.10679213,
variable="Ocean Temperature|Layer 3",
region="World|Northern Hemisphere|Ocean",
year=1973,
unit="K",
)
assert_mdata_value(
mdata,
0.13890633,
variable="Ocean Temperature|Layer 50",
region="World|Northern Hemisphere|Ocean",
year=2100,
unit="K",
)
def test_load_out_inverseemis():
mdata = MAGICCData(join(TEST_OUT_DIR, "INVERSEEMIS.OUT"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert "__MAGICC 6.X MISC DATA OUTPUT FILE__" in mdata.metadata["header"]
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["region"] == "World").all()
assert_mdata_value(
mdata,
0.01369638,
variable="Inverse Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
year=1765,
unit="Gt C / yr",
)
assert_mdata_value(
mdata,
2.6233208,
variable="Inverse Emissions|N2O",
region="World",
year=1770,
unit="Mt N2ON / yr",
)
assert_mdata_value(
mdata,
155.86567,
variable="Inverse Emissions|CH3Br",
region="World",
year=2099,
unit="kt CH3Br / yr",
)
assert_mdata_value(
mdata,
0.0,
variable="Inverse Emissions|CH3Cl",
region="World",
year=2100,
unit="kt CH3Cl / yr",
)
def test_load_out_co2pf_emis():
mdata = MAGICCData(join(TEST_OUT_DIR, "DAT_CO2PF_EMIS.OUT"))
generic_mdata_tests(mdata)
assert mdata.metadata["date"] == "2018-09-23 18:33"
assert (
mdata.metadata["magicc-version"]
== "6.8.01 BETA, 7th July 2012 - live.magicc.org"
)
assert "__MAGICC 6.X DATA OUTPUT FILE__" in mdata.metadata["header"]
assert (mdata["todo"] == "not_relevant").all()
assert (mdata["unit"] == "Gt C / yr").all()
assert (
mdata["variable"]
== "Net Land to Atmosphere Flux|CO2|Earth System Feedbacks|Permafrost"
).all()
assert_mdata_value(mdata, 0, region="World", year=1765)
assert_mdata_value(mdata, 0, region="World|Northern Hemisphere|Land", year=1765)
assert_mdata_value(mdata, 0, region="World|Northern Hemisphere|Ocean", year=1765)
assert_mdata_value(mdata, 0, region="World|Southern Hemisphere|Land", year=1765)
assert_mdata_value(mdata, 0, region="World|Southern Hemisphere|Ocean", year=1765)
def test_load_parameters_out_with_magicc_input():
test_file = "PARAMETERS.OUT"
expected_error_msg = (
r"^"
+ re.escape(
"MAGCCInput cannot read PARAMETERS.OUT as it is a config style file"
)
+ re.escape(", please use pymagicc.io.read_cfg_file")
+ r"$"
)
with pytest.raises(ValueError, match=expected_error_msg):
MAGICCData(join(TEST_OUT_DIR, test_file))
xfail_msg = (
"Output config files have heaps of spurious spaces, need to decide what to do "
"about this. If we strip them, we give the illustion that they're usable, "
"which they're not really..."
)
@pytest.mark.xfail(reason=xfail_msg)
def test_load_parameters_out():
cfg = read_cfg_file(join(TEST_OUT_DIR, "PARAMETERS.OUT"))
assert cfg["NML_YEARS"]["STARTYEAR"] == 1765
assert cfg["NML_YEARS"]["STEPSPERYEAR"] == 12
assert cfg["NML_ALLCFGS"]["PATHNAME_OUTFILES"] == "../out/"
assert cfg["NML_ALLCFGS"]["CO2_FEEDBACKFACTOR_GPP"] == 0.01070037
assert cfg["NML_OUTPUTCFGS"]["RF_INTEFFICACY_CH4"] == 0.987766458278542
assert cfg["NML_OUTPUTCFGS"]["RF_REGIONS_AER_DIR"] == [
-0.819497642538182,
-0.804446767198558,
-0.02718573381799450,
-0.01260873055223082,
]
def test_filter():
mdata = MAGICCData(join(MAGICC6_DIR, "HISTRCP_CO2I_EMIS.IN"))
tvariable = "Emissions|CO2|MAGICC Fossil and Industrial"
tregion = "World|R5LAM"
result = mdata.filter(variable=tvariable, region=tregion).timeseries()
mask = np.array(
(mdata.meta["variable"] == tvariable) & (mdata.meta["region"] == tregion)
)
expected = mdata.timeseries()[mask]
pd.testing.assert_frame_equal(
result, expected, check_names=False, check_like=True, check_column_type=False
)
def test_incomplete_filepath():
with pytest.raises(FileNotFoundError):
MAGICCData(join("/incomplete/dir/name"))
with pytest.raises(FileNotFoundError):
MAGICCData(join("RCP26.SCEN"))
def test_invalid_name():
with pytest.raises(FileNotFoundError):
MAGICCData(join("/tmp", "MYNONEXISTANT.IN"))
def test_header_metadata():
m = _Reader("test")
assert m.process_header("lkhdsljdkjflkjndlkjlkndjgf") == {
"header": "lkhdsljdkjflkjndlkjlkndjgf"
}
assert m.process_header("") == {}
assert m.process_header("Data: Average emissions per year") == {
"data": "Average emissions per year"
}
assert m.process_header(
"DATA: Historical landuse BC (BCB) Emissions (HISTRCP_BCB_EMIS) "
) == {"data": "Historical landuse BC (BCB) Emissions (HISTRCP_BCB_EMIS)"}
assert m.process_header(
"CONTACT: RCP 3-PD (IMAGE): Detlef van Vuuren (detlef.vanvuuren@pbl.nl); RCP 4.5 (MiniCAM): Allison Thomson (Allison.Thomson@pnl.gov); RCP 6.0 (AIM): Toshihiko Masui (masui@nies.go.jp); RCP 8.5 (MESSAGE): Keywan Riahi (riahi@iiasa.ac.at); Base year emissions inventories: Steve Smith (ssmith@pnl.gov) and Jean-Francois Lamarque (Jean-Francois.Lamarque@noaa.gov) "
) == {
"contact": "RCP 3-PD (IMAGE): Detlef van Vuuren (detlef.vanvuuren@pbl.nl); RCP 4.5 (MiniCAM): Allison Thomson (Allison.Thomson@pnl.gov); RCP 6.0 (AIM): Toshihiko Masui (masui@nies.go.jp); RCP 8.5 (MESSAGE): Keywan Riahi (riahi@iiasa.ac.at); Base year emissions inventories: Steve Smith (ssmith@pnl.gov) and Jean-Francois Lamarque (Jean-Francois.Lamarque@noaa.gov)"
}
assert m.process_header(
"DATE: 26/11/2009 11:29:06; MAGICC-VERSION: 6.3.09, 25 November 2009"
) == {"date": "26/11/2009 11:29:06; MAGICC-VERSION: 6.3.09, 25 November 2009"}
assert m.process_header(
"Compiled by: Zebedee Nicholls, Australian-German Climate & Energy College"
) == {"compiled by": "Zebedee Nicholls, Australian-German Climate & Energy College"}
def test_magicc_input_init():
# must init with data
with pytest.raises(TypeError):
MAGICCData()
def test_magicc_input_init_preserves_columns():
tmodel = "test model"
tscenario = "test scenario"
tclimate_model = "test climate model"
test_df = pd.DataFrame(
{
"model": tmodel,
"scenario": tscenario,
"climate_model": tclimate_model,
"variable": "Surface Temperature",
"region": "World|R5REF",
"unit": "K",
"time": 2012,
"value": 0.9,
},
index=[0],
)
mdata = MAGICCData(test_df)
assert (mdata["model"] == tmodel).all()
assert (mdata["scenario"] == tscenario).all()
assert (mdata["climate_model"] == tclimate_model).all()
def test_set_lines_and_find_nml():
reader = _Reader("test")
with pytest.raises(FileNotFoundError):
reader._set_lines_and_find_nml()
test_file = join(TEST_DATA_DIR, "HISTSSP_CO2I_EMIS.IN")
assert isfile(test_file)
reader = _Reader(test_file)
reader._set_lines_and_find_nml()
with open(test_file, "r", encoding="utf-8", newline="\n") as f:
assert reader.lines == f.readlines()
@pytest.mark.parametrize(
"test_filepath, expected_variable",
[
("/test/filename/paths/HISTABCD_CH4_CONC.IN", "CH4_CONC"),
("test/filename.OUT", None),
],
)
def test_conc_in_reader_get_variable_from_filepath(test_filepath, expected_variable):
conc_reader = _ConcInReader(test_filepath)
if expected_variable is None:
expected_message = re.escape(
"Cannot determine variable from filepath: {}".format(test_filepath)
)
with pytest.raises(ValueError, match=expected_message):
conc_reader._get_variable_from_filepath()
else:
assert conc_reader._get_variable_from_filepath() == expected_variable
ALL_FILE_TYPES = pytest.mark.parametrize(
"magicc_version, starting_fpath, starting_fname, confusing_metadata, old_namelist",
[
(6, MAGICC6_DIR, "HISTRCP_CO2I_EMIS.IN", False, False),
(6, MAGICC6_DIR, "HISTRCP_N2OI_EMIS.IN", False, False),
(6, MAGICC6_DIR, "MARLAND_CO2I_EMIS.IN", True, True), # weird units handling
(6, MAGICC6_DIR, "HISTRCP_CO2_CONC.IN", False, False),
(
6,
MAGICC6_DIR,
"HISTRCP_HFC245fa_CONC.IN",
True,
True,
), # weird units handling
(
6,
MAGICC6_DIR,
"HISTRCP_HFC43-10_CONC.IN",
True,
True,
), # weird units handling
(7, TEST_DATA_DIR, "HISTSSP_CO2I_EMIS.IN", False, False),
(6, MAGICC6_DIR, "MIXED_NOXI_OT.IN", True, True), # weird units handling
(6, MAGICC6_DIR, "GISS_BCB_RF.IN", True, True), # weird units handling
(6, MAGICC6_DIR, "HISTRCP_SOLAR_RF.IN", True, True), # weird units handling
(
6,
MAGICC6_DIR,
"RCPODS_WMO2006_Emissions_A1.prn",
True,
True,
), # weird units/gas handling
(
6,
MAGICC6_DIR,
"RCPODS_WMO2006_MixingRatios_A1.prn",
True,
True,
), # weird units and notes handling
(6, MAGICC6_DIR, "RCP26.SCEN", True, True), # metadata all over the place
(6, MAGICC6_DIR, "SRESA1B.SCEN", True, True), # metadata all over the place
(7, TEST_DATA_DIR, "TESTSCEN7.SCEN7", False, False),
(7, TEST_DATA_DIR, "MAG_FORMAT_SAMPLE.MAG", False, False),
(7, TEST_DATA_DIR, "MAG_FORMAT_SAMPLE_TWO.MAG", False, False),
],
)
@ALL_FILE_TYPES
def test_in_file_read_write_functionally_identical(
magicc_version,
starting_fpath,
starting_fname,
confusing_metadata,
old_namelist,
temp_dir,
):
mi_writer = MAGICCData(join(starting_fpath, starting_fname))
mi_writer.write(join(temp_dir, starting_fname), magicc_version=magicc_version)
mi_written = MAGICCData(join(temp_dir, starting_fname))
mi_initial = MAGICCData(join(starting_fpath, starting_fname))
if not old_namelist:
nml_written = f90nml.read(join(temp_dir, starting_fname))
nml_initial = f90nml.read(join(starting_fpath, starting_fname))
assert sorted(nml_written["thisfile_specifications"]) == sorted(
nml_initial["thisfile_specifications"]
)
# TODO: work out how to test files with confusing metadata, the Writers
# should fix the metadata but how to test that this has been fixed
# as intended is the next step
if not confusing_metadata:
for key_written, value_written in mi_written.metadata.items():
if key_written == "header":
# we don't care about headers matching exactly as they're never used
# by MAGICC
continue
try:
assert value_written.strip() == mi_initial.metadata[key_written].strip()
except: # noqa
assert value_written == mi_initial.metadata[key_written]
pd.testing.assert_frame_equal(
mi_written.timeseries().sort_index(),
mi_initial.timeseries().sort_index(),
check_column_type=False,
)
@ALL_FILE_TYPES
def test_nans_stripped_before_writing(
magicc_version,
starting_fpath,
starting_fname,
confusing_metadata,
old_namelist,
temp_dir,
):
mi_writer = MAGICCData(join(starting_fpath, starting_fname))
nan_idx = mi_writer.shape[1] // 2
nan_timestep = mi_writer["time"].iloc[nan_idx]
assert nan_timestep in mi_writer["time"].values
mi_writer.values[:, nan_idx] = np.nan
mi_writer.write(join(temp_dir, starting_fname), magicc_version=magicc_version)
mi_written = MAGICCData(join(temp_dir, starting_fname))
assert nan_timestep not in mi_written["time"].values
@ALL_FILE_TYPES
def test_raises_if_nans_not_uniform(
magicc_version,
starting_fpath,
starting_fname,
confusing_metadata,
old_namelist,
temp_dir,
):
mi_writer = MAGICCData(join(starting_fpath, starting_fname))
if mi_writer.shape[0] == 1:
pytest.skip("Only one timeseries so can't create mismatch")
nan_row = mi_writer.shape[0] // 2
nan_col = mi_writer.shape[1] // 2
mi_writer.values[nan_row, nan_col] = np.nan
error_msg = re.escape(
"Your data contains timesteps where some values are nan whilst others "
"are not. This will not work in MAGICC."
)
with pytest.raises(AssertionError, match=error_msg):
mi_writer.write(join(temp_dir, starting_fname), magicc_version=magicc_version)
emissions_valid = [
"CO2I",
"CO2B",
"CH4",
"N2O",
"SOX",
"CO",
"NMVOC",
"NOX",
"BC",
"OC",
"NH3",
"CF4",
"C2F6",
"C6F14",
"HFC23",
"HFC32",
"HFC4310",
"HFC125",
"HFC134A",
"HFC143A",
"HFC227EA",
"HFC245FA",
"SF6",
]
global_only = ["WORLD"]
sres_regions = ["WORLD", "OECD90", "REF", "ASIA", "ALM"]
rcp_regions = ["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM"]
# the fact these are valid for SCEN files but not for other data files is
# unbelievably confusing
rcp_regions_plus_bunkers = [
"WORLD",
"R5OECD",
"R5REF",
"R5ASIA",
"R5MAF",
"R5LAM",
"BUNKERS",
]
@pytest.mark.parametrize(
"regions, emissions, expected",
[
(global_only, emissions_valid, 11),
(sres_regions, emissions_valid, 21),
(sres_regions[1:], emissions_valid, "unrecognised regions"),
(sres_regions, emissions_valid[1:], "unrecognised emissions"),
(rcp_regions, emissions_valid, 31),
(rcp_regions_plus_bunkers, emissions_valid, 41),
],
)
def test_get_scen_special_code(regions, emissions, expected):
if expected == "unrecognised regions":
error_msg = re.escape(
"Could not determine scen special code for regions {}".format(regions)
)
with pytest.raises(ValueError, match=error_msg):
get_special_scen_code(regions, emissions)
elif expected == "unrecognised emissions":
error_msg = re.escape(
"Could not determine scen special code for emissions {}".format(emissions)
)
with pytest.raises(ValueError, match=error_msg):
get_special_scen_code(regions, emissions)
else:
result = get_special_scen_code(regions, emissions)
assert result == expected
@pytest.mark.parametrize("file_to_read", [f for f in listdir(MAGICC6_DIR)])
def test_can_read_all_files_in_magicc6_in_dir(file_to_read):
if file_to_read.endswith((".exe", ".MON")):
pass
elif file_to_read.endswith(".CFG"):
read_cfg_file(join(MAGICC6_DIR, file_to_read))
else:
mdata = MAGICCData(join(MAGICC6_DIR, file_to_read))
# make sure that no emissions units are read in bare e.g. the units of BC
# should be Mt BC / time, not simply Mt
assert not mdata["unit"].isin(["kt", "Mt"]).any()
@pytest.mark.parametrize("file_to_read", [f for f in TEST_OUT_FILES])
def test_can_read_all_valid_files_in_magicc6_out_dir(file_to_read):
if file_to_read.endswith(("PARAMETERS.OUT")):
read_cfg_file(join(TEST_OUT_DIR, file_to_read))
else:
for p in INVALID_OUT_FILES:
if re.match(p, file_to_read):
return
mdata = MAGICCData(join(TEST_OUT_DIR, file_to_read))
# make sure that no emissions units are read in bare e.g. the units of BC
# should be Mt BC / time, not simply Mt
assert not mdata["unit"].isin(["kt", "Mt"]).any()
@pytest.mark.parametrize("file_to_read", [f for f in TEST_OUT_FILES])
def test_cant_read_all_invalid_files_in_magicc6_out_dir(file_to_read):
valid_filepath = True
for p in INVALID_OUT_FILES:
if re.match(p, file_to_read):
valid_filepath = False
if valid_filepath:
return
if ("SUBANN" in file_to_read) or ("VOLCANIC_RF" in file_to_read):
error_msg = (
r"^.*"
+ re.escape(": Only annual files can currently be processed")
+ r".*$"
)
with pytest.raises(InvalidTemporalResError, match=error_msg):
MAGICCData(join(TEST_OUT_DIR, file_to_read))
else:
error_msg = (
r"^.*"
+ re.escape(
"is in an odd format for which we will never provide a reader/writer"
)
+ r".*$"
)
with pytest.raises(NoReaderWriterError, match=error_msg):
MAGICCData(join(TEST_OUT_DIR, file_to_read))
@pytest.mark.parametrize(
"file_to_read",
[f for f in listdir(TEST_OUT_DIR) if f.endswith("BINOUT") and f.startswith("DAT_")],
)
def test_bin_and_ascii_equal(file_to_read):
try:
mdata_bin = MAGICCData(join(TEST_OUT_DIR, file_to_read))
except InvalidTemporalResError:
# Some BINOUT files are on a subannual time scale and cannot be read (yet)
return
assert (mdata_bin["unit"] == "unknown").all()
assert (mdata_bin["todo"] == "not_relevant").all()
mdata_ascii = MAGICCData(join(TEST_OUT_DIR, file_to_read.replace("BINOUT", "OUT")))
mdata_bin["unit"] = mdata_ascii.get_unique_meta("unit", no_duplicates=True)
assert_scmdf_almost_equal(mdata_ascii, mdata_bin, check_ts_names=False)
@patch("pymagicc.io._read_metadata_and_df")
@pytest.mark.parametrize("inplace", (True, False))
def test_magicc_data_append(mock_read_metadata_and_df, inplace):
tfilepath = "mocked/out/here.txt"
tindex_yr = 2000
tmetadata_init = {"mock": 12, "mock overwrite": "written here"}
tdf_init_df = pd.DataFrame([[2.0, 1.2, 7.9]], index=[tindex_yr])
tdf_init_columns = {
"model": ["a"],
"scenario": ["b"],
"climate_model": ["c"],
"region": ["World|ASIA"],
"variable": ["GE", "GE|Coal", "GE|Gas"],
"unit": ["J/y"],
}
tdf_init = tdf_init_df.T
tdf_init.index = pd.MultiIndex.from_product(
tdf_init_columns.values(), names=tdf_init_columns.keys()
)
tmetadata_append = {"mock 12": 7, "mock overwrite": "written here too"}
tdf_append_df = pd.DataFrame([[-6.0, 3.2, 7.1]], index=[tindex_yr])
tdf_append_columns = {
"model": ["d"],
"scenario": ["e"],
"climate_model": ["f"],
"region": ["World|ASIA"],
"variable": ["GE", "GE|Coal", "GE|Gas"],
"unit": ["J/y"],
}
tdf_append = tdf_append_df.T
tdf_append.index = pd.MultiIndex.from_product(
tdf_append_columns.values(), names=tdf_append_columns.keys()
)
mock_read_metadata_and_df.return_value = (
tmetadata_init,
tdf_init_df,
tdf_init_columns,
)
mdata = MAGICCData("mocked")
mock_read_metadata_and_df.return_value = (
tmetadata_append,
tdf_append_df,
tdf_append_columns,
)
if inplace:
mdata.append(tfilepath, inplace=inplace)
res = mdata
else:
original = deepcopy(mdata)
res = mdata.append(tfilepath, inplace=inplace)
pd.testing.assert_frame_equal(
original.timeseries(), mdata.timeseries(), check_column_type=False
)
assert original.metadata == mdata.metadata
mock_read_metadata_and_df.assert_called_with(tfilepath)
assert isinstance(res, MAGICCData)
expected_metadata = deepcopy(tmetadata_init)
for k, v in tmetadata_append.items():
if k not in expected_metadata:
expected_metadata[k] = v
assert res.metadata == expected_metadata
expected = pd.concat([tdf_init, tdf_append])
expected.columns = pd.Index([dt.datetime(tindex_yr, 1, 1, 0, 0, 0)], dtype="object")
pd.testing.assert_frame_equal(
res.timeseries(),
expected.sort_index().reorder_levels(mdata.timeseries().index.names),
check_like=True,
check_column_type=False,
)
@patch("pymagicc.io.pull_cfg_from_parameters_out")
@patch("pymagicc.io.read_cfg_file")
def test_pull_cfg_from_parameters_out_file(
mock_read_cfg_file, mock_pull_cfg_from_parameters_out
):
tfile = "here/there/PARAMETERS.OUT"
tparas_out = {"nml_allcfgs": {"para_1": 3}}
tnamelist_out = f90nml.Namelist(tparas_out)
mock_read_cfg_file.return_value = tparas_out
mock_pull_cfg_from_parameters_out.return_value = tnamelist_out
result = pull_cfg_from_parameters_out_file(tfile)
assert result == tnamelist_out
mock_read_cfg_file.assert_called_with(tfile)
mock_pull_cfg_from_parameters_out.assert_called_with(
tparas_out, namelist_to_read="nml_allcfgs"
)
result = pull_cfg_from_parameters_out_file(tfile, namelist_to_read="nml_test")
assert result == tnamelist_out
mock_read_cfg_file.assert_called_with(tfile)
mock_pull_cfg_from_parameters_out.assert_called_with(
tparas_out, namelist_to_read="nml_test"
)
def test_pull_cfg_from_parameters_out():
tparas_out = {
"nml_allcfgs": {
"para_1": 3,
"para_2": " string here ",
"para_3": [1, 2, 3, 4],
"para_4": [" as sld ", "abc", "\x00"],
"file_tuningmodel": "MAGTUNE_ABC.CFG",
"file_tuningmodel_2": "MAGTUNE_DEF.CFG",
"file_tuningmodel_3": "MAGTUNE_JAKF.CFG",
},
"nml_outcfgs": {
"para_1": -13,
"para_2": "string here too",
"para_3": [-0.1, 0, 0.1, 0.2],
"para_4": ["tabs sldx ", " abc ", "\x00", "\x00", " "],
},
}
result = pull_cfg_from_parameters_out(tparas_out)
expected = f90nml.Namelist(
{
"nml_allcfgs": {
"para_1": 3,
"para_2": "string here",
"para_3": [1, 2, 3, 4],
"para_4": ["as sld", "abc"],
"file_tuningmodel": "",
"file_tuningmodel_2": "",
"file_tuningmodel_3": "",
}
}
)
for key, value in result.items():
for sub_key, sub_value in value.items():
assert sub_value == expected[key][sub_key]
result = pull_cfg_from_parameters_out(
f90nml.Namelist(tparas_out), namelist_to_read="nml_outcfgs"
)
expected = f90nml.Namelist(
{
"nml_outcfgs": {
"para_1": -13,
"para_2": "string here too",
"para_3": [-0.1, 0, 0.1, 0.2],
"para_4": ["tabs sldx", "abc"],
}
}
)
for key, value in result.items():
for sub_key, sub_value in value.items():
assert sub_value == expected[key][sub_key]
def test_write_emis_in_unrecognised_region_error(temp_dir, writing_base_emissions):
tregions = ["R5REF", "R5OECD", "R5LAM", "R5ASIA", "R5MAF"]
writing_base_emissions["region"] = tregions
writing_base_emissions["variable"] = "Emissions|CO2"
writing_base_emissions.metadata = {"header": "TODO: fix error message"}
error_msg = re.escape(
"Are all of your regions OpenSCM regions? I don't "
"recognise: {}".format(sorted(tregions))
)
with pytest.raises(ValueError, match=error_msg):
writing_base_emissions.write(
join(temp_dir, "TMP_CO2_EMIS.IN"), magicc_version=6
)
def test_write_unrecognised_region_combination_error(temp_dir, writing_base_emissions):
writing_base_emissions["variable"] = "Emissions|CO2"
error_msg = re.escape(
"Unrecognised regions, they must be part of "
"pymagicc.definitions.DATTYPE_REGIONMODE_REGIONS. If that doesn't make "
"sense, please raise an issue at "
"https://github.com/openscm/pymagicc/issues"
)
assert isinstance(pymagicc.definitions.DATTYPE_REGIONMODE_REGIONS, pd.DataFrame)
with pytest.raises(ValueError, match=error_msg):
writing_base_emissions.write(
join(temp_dir, "TMP_CO2_EMIS.IN"), magicc_version=6
)
def test_write_no_header_error(temp_dir, writing_base_emissions):
writing_base_emissions["variable"] = "Emissions|CO2"
tregions = [
"World|{}".format(r) for r in ["R5REF", "R5OECD", "R5LAM", "R5ASIA", "R5MAF"]
]
writing_base_emissions["region"] = tregions
writing_base_emissions["variable"] = "Emissions|CO2"
writing_base_emissions["unit"] = "GtC / yr"
error_msg = re.escape('Please provide a file header in ``self.metadata["header"]``')
with pytest.raises(KeyError, match=error_msg):
writing_base_emissions.write(
join(temp_dir, "TMP_CO2_EMIS.IN"), magicc_version=6
)
# integration test
def test_write_emis_in(temp_dir, update_expected_file, writing_base_emissions):
tregions = [
"World|{}".format(r) for r in ["R5REF", "R5OECD", "R5LAM", "R5ASIA", "R5MAF"]
]
writing_base_emissions["region"] = tregions
writing_base_emissions["variable"] = "Emissions|CO2"
writing_base_emissions["unit"] = "GtC / yr"
res = join(temp_dir, "TMP_CO2_EMIS.IN")
writing_base_emissions.metadata = {"header": "Test CO2 Emissions file"}
writing_base_emissions.write(res, magicc_version=6)
expected = join(EXPECTED_FILES_DIR, "EXPECTED_CO2_EMIS.IN")
run_writing_comparison(res, expected, update=update_expected_file)
def test_write_emis_in_variable_name_error(temp_dir, writing_base_emissions):
tregions = [
"World|{}".format(r) for r in ["R5REF", "R5OECD", "R5LAM", "R5ASIA", "R5MAF"]
]
writing_base_emissions["region"] = tregions
writing_base_emissions["variable"] = "Emissions|CO2|MAGICC AFOLU"
writing_base_emissions.metadata = {"header": "Test misnamed CO2 Emissions file"}
error_msg = re.escape(
"Your filename variable, Emissions|CO2, does not match the data "
"variable, Emissions|CO2|MAGICC AFOLU"
)
with pytest.raises(ValueError, match=error_msg):
writing_base_emissions.write(
join(temp_dir, "TMP_CO2_EMIS.IN"), magicc_version=6
)
# integration test
def test_write_temp_in(temp_dir, update_expected_file, writing_base):
# almost certain spacing doesn't matter for fourbox data, can always make more
# restrictive in future if required
tregions = [
"World|{}|{}".format(r, sr)
for r in ["Southern Hemisphere", "Northern Hemisphere"]
for sr in ["Ocean", "Land"]
]
writing_base["region"] = tregions
writing_base["variable"] = "Surface Temperature"
writing_base["unit"] = "K"
res = join(temp_dir, "TMP_SURFACE_TEMP.IN")
writing_base.metadata = {"header": "Test Surface temperature input file"}
writing_base.write(res, magicc_version=6)
expected = join(EXPECTED_FILES_DIR, "EXPECTED_SURFACE_TEMP.IN")
run_writing_comparison(res, expected, update=update_expected_file)
def test_write_temp_in_variable_name_error(temp_dir, writing_base):
tregions = [
"World|{}|{}".format(r, sr)
for r in ["Northern Hemisphere", "Southern Hemisphere"]
for sr in ["Ocean", "Land"]
]
writing_base["region"] = tregions
writing_base["variable"] = "Ocean Temperature"
writing_base.metadata = {"header": "Test misnamed Surface temperature file"}
error_msg = re.escape(
"Your filename variable, Surface Temperature, does not match the data "
"variable, Ocean Temperature"
)
with pytest.raises(ValueError, match=error_msg):
writing_base.write(join(temp_dir, "TMP_SURFACE_TEMP.IN"), magicc_version=6)
def test_surface_temp_in_reader():
mdata = MAGICCData(join(EXPECTED_FILES_DIR, "EXPECTED_SURFACE_TEMP.IN"))
generic_mdata_tests(mdata)
assert "Test Surface temperature input file" in mdata.metadata["header"]
assert (mdata["todo"] == "SET").all()
assert (mdata["unit"] == "K").all()
assert (mdata["variable"] == "Surface Temperature").all()
assert_mdata_value(mdata, 6, region="World|Northern Hemisphere|Ocean", year=1996)
assert_mdata_value(mdata, 3, region="World|Northern Hemisphere|Land", year=1995)
assert_mdata_value(mdata, 4, region="World|Southern Hemisphere|Ocean", year=1996)
assert_mdata_value(mdata, 5, region="World|Southern Hemisphere|Land", year=1996)
def test_prn_wrong_region_error():
base = (
MAGICCData(
join(EXPECTED_FILES_DIR, "EXPECTED_RCPODS_WMO2006_MixingRatios_A1.prn")
)
.timeseries()
.reset_index()
)
other = base.copy()
other["region"] = "World|R5ASIA"
merged = pd.concat([base, other]).reset_index(drop=True)
writer = MAGICCData(merged)
error_msg = re.escape(".prn files can only contain the 'World' region")
with pytest.raises(AssertionError, match=error_msg):
writer.write("Unused.prn", magicc_version=6)
def test_prn_wrong_unit_error():
base = (
MAGICCData(
join(EXPECTED_FILES_DIR, "EXPECTED_RCPODS_WMO2006_MixingRatios_A1.prn")
)
.timeseries()
.reset_index()
)
base.loc[base["variable"] == "Atmospheric Concentrations|CFC11", "unit"] = "ppb"
writer = MAGICCData(base)
writer.metadata = {"header": "not used"}
error_msg = re.escape(
"prn file units should either all be 'ppt' or all be 't [gas] / yr', "
"units of ['ppb', 'ppt'] do not meet this requirement"
)
with pytest.raises(ValueError, match=error_msg):
writer.write("Unused.prn", magicc_version=6)
def test_compact_out_reader():
mdata = MAGICCData(join(TEST_DATA_DIR, "COMPACT.OUT"))
generic_mdata_tests(
mdata,
extra_index_cols={
"run_id": int,
"core_climatesensitivity": float,
"rf_regions_ch4": tuple,
},
)
assert (mdata["unit"] == "unknown").all()
assert mdata.filter(run_id=0)["rf_regions_ch4"].unique().tolist() == [
(0.3, 0.4, 0.2, 0.1),
]
assert mdata.filter(run_id=1)["rf_regions_ch4"].unique().tolist() == [
(0.1, 0.8, 0.0, 0.0),
]
assert mdata.filter(run_id=0)["core_climatesensitivity"].unique().tolist() == [
2.5,
]
assert mdata.filter(run_id=1)["core_climatesensitivity"].unique().tolist() == [
3.0,
]
assert_mdata_value(mdata, 277.9355, region="World", year=1765, run_id=0)
assert_mdata_value(mdata, 277.9355, region="World", year=1765, run_id=1)
assert_mdata_value(
mdata, 355.8137, region="World|Northern Hemisphere|Ocean", year=1990, run_id=0
)
assert_mdata_value(
mdata, 365.8137, region="World|Northern Hemisphere|Ocean", year=1990, run_id=1
)
def test_compact_binout_reader():
mdata = MAGICCData(join(TEST_DATA_DIR, "COMPACT.BINOUT"))
generic_mdata_tests(
mdata, extra_index_cols={"run_id": int, "core_climatesensitivity": float,},
)
assert (mdata["unit"] == "unknown").all()
assert mdata.filter(run_id=0)["core_climatesensitivity"].unique().tolist() == [
2.5,
]
assert mdata.filter(run_id=1)["core_climatesensitivity"].unique().tolist() == [
3.0,
]
assert_mdata_value(mdata, 277.9355, region="World", year=1765, run_id=0)
assert_mdata_value(mdata, 277.9355, region="World", year=1765, run_id=1)
assert_mdata_value(
mdata, 355.8137, region="World|Northern Hemisphere|Ocean", year=1990, run_id=0
)
assert_mdata_value(
mdata, 355.8137, region="World|Northern Hemisphere|Ocean", year=1990, run_id=1
)
def test_compact_out_writer():
test_name = "TEST_COMPACT.OUT"
expected_message = re.escape(
"A writer for `^.*COMPACT\\.OUT$` files is not yet implemented"
)
with pytest.raises(NotImplementedError, match=expected_message):
determine_tool(join(TEST_DATA_DIR, test_name), "writer")
def test_compact_binout_writer():
test_name = "TEST_COMPACT.BINOUT"
expected_message = re.escape(
"A writer for `^.*COMPACT\\.BINOUT$` files is not yet implemented"
)
with pytest.raises(NotImplementedError, match=expected_message):
determine_tool(join(TEST_DATA_DIR, test_name), "writer")
# integration test
@pytest.mark.parametrize(
"starting_file",
[
"EXPECTED_RCPODS_WMO2006_Emissions_A1.prn",
"EXPECTED_RCPODS_WMO2006_MixingRatios_A1.prn",
"EXPECTED_RCP26.SCEN",
"EXPECTED_HISTRCP_NOXI_EMIS.IN",
"EXPECTED_HISTRCP_HFC43-10_CONC.IN",
"EXPECTED_HISTRCP85_SOLAR_RF.IN",
"EXPECTED_GISS_BCI_OT.IN",
"EXPECTED_HISTSSP_CO2I_EMIS.IN",
],
)
def test_writing_identical(temp_dir, update_expected_file, starting_file):
"""
Test io writes files with correct order and spacing.
See docs (MAGICC file conventions) for notes about why files may differ from
files in the ``pymagicc/MAGICC6/run`` directory.
"""
base = join(EXPECTED_FILES_DIR, starting_file)
writing_base = MAGICCData(base)
# shuffle column order, thank you https://stackoverflow.com/a/34879805
writer = MAGICCData(writing_base.timeseries().sample(frac=1))
res = join(temp_dir, starting_file)
writer.metadata = deepcopy(writing_base.metadata)
writer.write(res, magicc_version=6)
run_writing_comparison(res, base, update=update_expected_file)
# integration test
@pytest.mark.parametrize(
"starting_file,magicc_version",
[
("EXPECTED_MAGICC7_EMISSIONS.DAT", 7),
("EXPECTED_MAGICC7_MIDYEAR_CONCENTRATIONS.DAT", 7),
("EXPECTED_MAGICC7_MIDYEAR_RADFORCING.DAT", 7),
("EXPECTED_MAGICC7_MIDYEAR_EFFECTIVERADFORCING.DAT", 7),
("EXPECTED_MAGICC6_EMISSIONS.DAT", 6),
("EXPECTED_MAGICC6_MIDYEAR_CONCENTRATIONS.DAT", 6),
("EXPECTED_MAGICC6_MIDYEAR_RADFORCING.DAT", 6),
],
)
@pytest.mark.parametrize("add_extra_data", [True, False])
def test_writing_identical_rcpdat(
temp_dir, update_expected_file, starting_file, magicc_version, add_extra_data
):
base = join(EXPECTED_FILES_DIR, starting_file)
writing_base = MAGICCData(base)
# shuffle column order, thank you https://stackoverflow.com/a/34879805
writer = MAGICCData(writing_base.timeseries().sample(frac=1))
if add_extra_data:
tmp = writer.timeseries()
tmp = pd.concat([tmp, tmp.iloc[0, :].to_frame().T], axis=0)
tmp.iloc[-1, :] = np.arange(tmp.shape[1]) / tmp.shape[1]
tmp = tmp.reset_index()
tmp["variable"].iloc[-1] = "Surface Temperature"
tmp["unit"].iloc[-1] = "K"
writer = MAGICCData(tmp)
res = join(temp_dir, starting_file)
writer.metadata = deepcopy(writing_base.metadata)
warning_msg = re.escape(
"The `.DAT` format is an old, custom format. We strongly recommend using "
"the `ScmRun` format instead (just call `.to_csv()`). Our `.DAT` "
"writers are not super well tested so the error messages are likely "
"to be cryptic. If you need help, please raise an issue at "
"https://github.com/openscm/pymagicc/issues"
)
with pytest.warns(Warning, match=warning_msg):
writer.write(res, magicc_version=magicc_version)
def strip_out_date_line(inpath, outpath):
with open(inpath, "r") as f:
base_lines = f.read().split("\n")
found_date = False
base_lines_no_date = []
for line in base_lines:
if line.startswith("DATE:"):
found_date = True
continue
base_lines_no_date.append(line)
assert found_date
with open(outpath, "w") as f:
f.write("\n".join(base_lines_no_date))
if not update_expected_file:
# strip out date line before comparing as it won't be the same
base_comp = join(temp_dir, "BASE_{}".format(starting_file))
strip_out_date_line(base, base_comp)
strip_out_date_line(res, res)
run_writing_comparison(res, base_comp, update=update_expected_file)
else:
run_writing_comparison(res, base, update=update_expected_file)
def test_writing_erf_magicc6_error():
writer = MAGICCData(
join(EXPECTED_FILES_DIR, "EXPECTED_MAGICC7_MIDYEAR_EFFECTIVERADFORCING.DAT")
)
error_msg = re.escape("MAGICC6 does not output effective radiative forcing")
with pytest.raises(ValueError, match=error_msg):
writer.write(
"IGNORED_MAGICC6_MIDYEAR_EFFECTIVERADFORCING.DAT", magicc_version=6
)
def test_mag_writer(temp_dir, writing_base_mag):
file_to_write = join(temp_dir, "TEST_NAME.MAG")
writing_base_mag.filter(year=2100, keep=False).write(
file_to_write, magicc_version=7
)
with open(file_to_write) as f:
content = f.read()
assert "THISFILE_REGIONMODE = 'NONE'" in content
assert "THISFILE_ANNUALSTEPS = 0" in content
assert "other info: checking time point handling" in content
assert "Test mag file" in content
assert "LAND" in content
assert "OCEAN" in content
res = MAGICCData(file_to_write)
assert (
res.filter(
region="World|Northern Hemisphere", year=2099, month=1
).values.squeeze()
== 1
)
assert (
res.filter(
region="World|Southern Hemisphere", year=2099, month=11
).values.squeeze()
== 52
)
assert res.filter(region="World|Land", year=2099, month=5).values.squeeze() == 23
assert res.filter(region="World|Ocean", year=2099, month=12).values.squeeze() == 59
assert res.filter(region="World", year=2101, month=3).values.squeeze() == 130
@pytest.mark.parametrize("n_writes", [1, 2])
def test_mag_writer_ar6_region(temp_dir, writing_base_mag, n_writes):
file_to_write = join(temp_dir, "TEST_AR6_LINK.MAG")
region_map = {
"World": "World",
"World|Northern Hemisphere": "World|AR6|ARO",
"World|Southern Hemisphere": "World|AR6|NEN",
"World|Land": "World|Land",
"World|Ocean": "World|Ocean",
}
writing_base_mag["region"] = writing_base_mag["region"].map(region_map)
writing_base_mag.filter(year=2100, keep=False).write(
file_to_write, magicc_version=7
)
for _ in range(1, n_writes):
read = MAGICCData(file_to_write)
read.write(file_to_write, magicc_version=7)
with open(file_to_write) as f:
content = f.read()
assert (
"For more information on the AR6 regions (including mapping the "
"abbrevations to their full names), see: "
"https://github.com/SantanderMetGroup/ATLAS/tree/master/reference-regions, "
"specifically https://github.com/SantanderMetGroup/ATLAS/blob/master/reference-regions/IPCC-WGI-reference-regions-v4_coordinates.csv "
"(paper is at https://doi.org/10.5194/essd-2019-258)"
)
assert "World|AR6-ARO" not in content
assert "AR6-ARO" in content
assert "AR6-NEN" in content
assert (
len(
[
line
for line in content.split("\n")
if "doi.org/10.5194/essd-2019-258" in line
]
)
== 1
)
def _alter_to_timeseriestype(inscmdf, timeseriestype):
if timeseriestype == "POINT_START_YEAR":
return inscmdf.interpolate(
target_times=[
dt.datetime(y, 1, 1)
for y in set(inscmdf["time"].apply(lambda x: x.year))
],
)
if timeseriestype == "POINT_MID_YEAR":
return inscmdf.interpolate(
target_times=[
dt.datetime(y, 7, 1)
for y in set(inscmdf["time"].apply(lambda x: x.year))
],
)
if timeseriestype == "POINT_END_YEAR":
return inscmdf.interpolate(
target_times=[
dt.datetime(y, 12, 31)
for y in set(inscmdf["time"].apply(lambda x: x.year))
],
)
if timeseriestype == "AVERAGE_YEAR_START_YEAR":
return inscmdf.time_mean("AS")
if timeseriestype == "AVERAGE_YEAR_MID_YEAR":
return inscmdf.time_mean("AC")
if timeseriestype == "AVERAGE_YEAR_END_YEAR":
return inscmdf.time_mean("A")
if timeseriestype == "MONTHLY":
return inscmdf
raise AssertionError("shouldn't get here")
_TIMESERIESTYPES = (
"POINT_START_YEAR",
"POINT_MID_YEAR",
"POINT_END_YEAR",
"AVERAGE_YEAR_START_YEAR",
"AVERAGE_YEAR_MID_YEAR",
"AVERAGE_YEAR_END_YEAR",
"MONTHLY",
)
@pytest.mark.parametrize("timeseriestype", _TIMESERIESTYPES)
def test_mag_writer_timeseriestypes(temp_dir, writing_base_mag, timeseriestype):
file_to_write = join(temp_dir, "TEST_NAME.MAG")
writing_base_mag = _alter_to_timeseriestype(writing_base_mag, timeseriestype)
writing_base_mag = writing_base_mag.timeseries().reset_index()
writing_base_mag["climate_model"] = "unspecified"
writing_base_mag["scenario"] = "unspecified"
writing_base_mag["unit"] = writing_base_mag["unit"].apply(
lambda x: x.replace("/", "per")
)
writing_base_mag = MAGICCData(writing_base_mag)
writing_base_mag.metadata = {"timeseriestype": timeseriestype, "header": "shh"}
writing_base_mag.write(file_to_write, magicc_version=7)
with open(file_to_write) as f:
content = f.read()
assert "THISFILE_ANNUALSTEPS = 1" in content
assert "THISFILE_TIMESERIESTYPE = '{}'".format(timeseriestype) in content
res = MAGICCData(file_to_write)
unit = writing_base_mag.get_unique_meta("unit", no_duplicates=True)
writing_base_mag["unit"] = unit.replace("per", "/")
exp_ts = writing_base_mag.timeseries()
if timeseriestype == "MONTHLY":
res_ts = res.timeseries()
# month test is overly sensitive so do column by column
for res_col, exp_col in zip(res_ts.columns, exp_ts.columns):
assert res_col.year == exp_col.year
assert res_col.month == exp_col.month
assert np.abs(res_col.day - exp_col.day) <= 1
res["time"] = writing_base_mag["time"]
assert_scmdf_almost_equal(res, writing_base_mag, check_ts_names=False)
@pytest.mark.parametrize("timeseriestype", _TIMESERIESTYPES)
def test_mag_writer_timeseriestypes_data_mismatch_error(
temp_dir, writing_base_mag, timeseriestype
):
file_to_write = join(temp_dir, "TEST_NAME.MAG")
writing_base_mag = MAGICCData(
_alter_to_timeseriestype(
writing_base_mag,
"POINT_MID_YEAR"
if timeseriestype not in ("POINT_MID_YEAR", "AVERAGE_YEAR_MID_YEAR")
else "AVERAGE_YEAR_START_YEAR",
)
)
writing_base_mag.metadata = {"timeseriestype": timeseriestype, "header": "shh"}
error_msg = re.escape(
"timeseriestype ({}) doesn't match data".format(timeseriestype)
)
with pytest.raises(ValueError, match=error_msg):
writing_base_mag.write(file_to_write, magicc_version=7)
@pytest.mark.parametrize("timeseriestype", ("junk",))
def test_mag_writer_timeseriestypes_unrecognised_timeseriestype_error(
temp_dir, writing_base_mag, timeseriestype
):
file_to_write = join(temp_dir, "TEST_NAME.MAG")
writing_base_mag.metadata["timeseriestype"] = timeseriestype
error_msg = re.escape("Unrecognised timeseriestype: {}".format(timeseriestype))
with pytest.raises(ValueError, match=error_msg):
writing_base_mag.write(file_to_write, magicc_version=7)
def test_mag_writer_valid_region_mode(temp_dir, writing_base):
tregions = [
"World|{}|{}".format(r, sr)
for r in ["Northern Hemisphere", "Southern Hemisphere"]
for sr in ["Ocean", "Land"]
]
writing_base["region"] = tregions
writing_base["variable"] = "Ocean Temperature"
writing_base.metadata = {
"header": "Test mag file where regionmode is picked up",
"timeseriestype": "AVERAGE_YEAR_START_YEAR",
}
file_to_write = join(temp_dir, "TEST_NAME.MAG")
writing_base.write(file_to_write, magicc_version=7)
with open(file_to_write) as f:
content = f.read()
assert "THISFILE_REGIONMODE = 'FOURBOX'" in content
def test_mag_writer_unrecognised_region_warning(temp_dir, writing_base):
tregions = [
"World|{}|{}".format(r, sr)
for r in ["Northern Hemisphere", "Southern Hemisphare"]
for sr in ["Ocean", "Land"]
]
writing_base["region"] = tregions
writing_base["variable"] = "Ocean Temperature"
writing_base.metadata = {
"header": "Test mag file where regions are misnamed",
"timeseriestype": "AVERAGE_YEAR_START_YEAR",
}
warn_msg = re.compile(
r"^Not abbreviating regions, could not find abbreviation for "
r"\['WORLD\|Southern Hemisphare\|.*', "
r"'WORLD\|Southern Hemisphare\|.*'\]$"
)
with warnings.catch_warnings(record=True) as warn_unrecognised_region:
writing_base.write(join(temp_dir, "TEST_NAME.MAG"), magicc_version=7)
assert len(warn_unrecognised_region) == 1
assert warn_msg.match(str(warn_unrecognised_region[0].message))
def test_mag_writer_error_if_magicc6(temp_dir, writing_base):
tregions = [
"World|{}|{}".format(r, sr)
for r in ["Northern Hemisphere", "Southern Hemisphere"]
for sr in ["Ocean", "Land"]
]
writing_base["region"] = tregions
writing_base["variable"] = "Ocean Temperature"
writing_base.metadata = {"header": "MAGICC6 error test"}
error_msg = re.escape(".MAG files are not MAGICC6 compatible")
with pytest.raises(ValueError, match=error_msg):
writing_base.write(join(temp_dir, "TEST_NAME.MAG"), magicc_version=6)
def test_mag_reader():
mdata = MAGICCData(join(TEST_DATA_DIR, "MAG_FORMAT_SAMPLE.MAG"))
generic_mdata_tests(mdata)
assert "Date crunched: DATESTRING" in mdata.metadata["header"]
assert (
"Affiliation: Climate & Energy College, The University of Melbourne"
in mdata.metadata["header"]
)
assert mdata.metadata["key"] == "value"
assert (
mdata.metadata["original source"] == "somewhere over the rainbow of 125 moons"
)
assert mdata.metadata["length"] == "53 furlongs"
assert "region abbreviations" in mdata.metadata
assert (mdata["unit"] == "K").all()
assert (mdata["variable"] == "Surface Temperature").all()
assert_mdata_value(
mdata, 0, region="World|Northern Hemisphere|Land", year=1910, month=1
)
assert_mdata_value(
mdata, 3, region="World|Southern Hemisphere|Land", year=1910, month=8
)
assert_mdata_value(
mdata, 5, region="World|Southern Hemisphere|Ocean", year=1911, month=2
)
assert_mdata_value(
mdata, 12, region="World|North Atlantic Ocean", year=1911, month=6
)
assert_mdata_value(mdata, 9, region="World|El Nino N3.4", year=1911, month=7)
@pytest.mark.parametrize(
"test_file",
(
join(TEST_DATA_DIR, "MAG_FORMAT_SAMPLE.MAG"),
join(TEST_DATA_DIR, "MAG_FORMAT_SAMPLE_TWO.MAG"),
join(TEST_DATA_DIR, "MAG_FORMAT_SAMPLE_LONG_DATA_SALT.MAG"),
),
)
def test_mag_reader_metadata_only(benchmark, test_file):
result = benchmark(read_mag_file_metadata, filepath=test_file)
checker = MAGICCData(test_file)
assert result == checker.metadata
def test_mag_reader_metadata_only_wrong_file_type():
with pytest.raises(ValueError, match=re.escape("File must be a `.MAG` file")):
read_mag_file_metadata("CO2I_EMIS.IN")
@pytest.mark.parametrize(
"broken_file",
(
join(TEST_DATA_DIR, "MAG_FORMAT_MISSING_NAMELIST_END.MAG"),
join(TEST_DATA_DIR, "MAG_FORMAT_WRONG_NAMELIST_NAME.MAG"),
),
)
def test_mag_reader_metadata_only_missing_namelist(broken_file):
with pytest.raises(ValueError, match=re.escape("Could not find namelist")):
read_mag_file_metadata(broken_file)
def test_mag_writer_default_header(temp_dir, writing_base):
tregions = [
"World|{}|{}".format(r, sr)
for r in ["Northern Hemisphere", "Southern Hemisphere"]
for sr in ["Ocean", "Land"]
]
writing_base["region"] = tregions
writing_base["variable"] = "Ocean Temperature"
writing_base.metadata = {"timeseriestype": "AVERAGE_YEAR_START_YEAR"}
write_file = join(temp_dir, "TEST_NAME.MAG")
default_header_lines = [re.compile("Date: .*"), re.compile("Writer: pymagicc v.*")]
warn_msg = (
"No header detected, it will be automatically added. We recommend setting "
"`self.metadata['header']` to ensure your files have the desired metadata."
)
with warnings.catch_warnings(record=True) as warn_no_header:
writing_base.write(write_file, magicc_version=7)
assert len(warn_no_header) == 1
assert str(warn_no_header[0].message) == warn_msg
with open(write_file) as f:
content = f.read().split("\n")
for d in default_header_lines:
found_line = False
for line in content:
if d.match(line):
found_line = True
break
if not found_line:
assert False, "Missing header line: {}".format(d)
@pytest.mark.parametrize(
"valid,time_axis",
[
(True, [dt.datetime(y, m, 1) for y in range(2000, 2031) for m in range(1, 13)]),
(True, [dt.datetime(y, m, 2) for y in range(2000, 2031) for m in range(1, 13)]),
(
False,
[dt.datetime(y, m, 3) for y in range(2000, 2031) for m in range(1, 13)],
),
(
False,
[dt.datetime(y, m, 13) for y in range(2000, 2031) for m in range(1, 13)],
),
(
False,
[dt.datetime(y, m, 14) for y in range(2000, 2031) for m in range(1, 13)],
),
(
True,
[dt.datetime(y, m, 15) for y in range(2000, 2031) for m in range(1, 13)],
),
(
True,
[dt.datetime(y, m, 16) for y in range(2000, 2031) for m in range(1, 13)],
),
(
True,
[dt.datetime(y, m, 17) for y in range(2000, 2031) for m in range(1, 13)],
),
(
False,
[dt.datetime(y, m, 18) for y in range(2000, 2031) for m in range(1, 13)],
),
(
False,
[dt.datetime(y, m, 28) for y in range(2000, 2031) for m in range(1, 13)],
),
],
)
def test_timestamp_handling(valid, time_axis, temp_dir):
writing_base = MAGICCData(
np.arange(0, 40).reshape(10, 4),
index=[dt.datetime(y, 1, 16) for y in range(1000, 3000, 200)],
columns={
"variable": "Atmospheric Concentrations|CH4",
"region": [
"World|Northern Hemisphere|Land",
"World|Northern Hemisphere|Ocean",
"World|Southern Hemisphere|Land",
"World|Southern Hemisphere|Ocean",
],
"unit": "ppb",
"model": "None",
"scenario": "test",
"todo": "SET",
"parameter_type": "point",
},
)
writing_base = writing_base.interpolate(time_axis)
writing_base.metadata["header"] = "Test timestamp handling file"
if valid:
out_file = join(temp_dir, "TEST_CH4_CONC.IN")
writing_base.write(out_file, magicc_version=7)
with open(out_file) as f:
content = f.read()
assert "THISFILE_FIRSTYEAR = {}".format(time_axis[0].year) in content
assert "THISFILE_LASTYEAR = {}".format(time_axis[-1].year) in content
else:
error_msg = re.escape(
"Your timestamps don't appear to be middle or start of month"
)
with pytest.raises(ValueError, match=error_msg):
writing_base.write(join(temp_dir, "TEST_CH4_CONC.IN"), magicc_version=7)
def test_to_int_value_error():
error_msg = re.escape("invalid values `{}`".format([4.5, 6.5]))
with pytest.raises(ValueError, match=error_msg):
to_int(np.array([1, 3, 4.5, 6.5, 7.0, 8]))
def test_to_int_type_error():
inp = [1, 3, 4.5, 6.5, 7.0, 8]
error_msg = re.escape(
"For our own sanity, this method only works with np.ndarray input. x is "
"type: {}".format(type(inp))
)
with pytest.raises(TypeError, match=error_msg):
to_int(inp)
@pytest.mark.parametrize(
"start_list,expected",
(
(["CORE_CLIMATESENSITIVITY", "RUN_ID"], {}),
(
[
"CORE_CLIMATESENSITIVITY",
"RF_BBAER_DIR_WM2",
"OUT_ZERO_TEMP_PERIOD_1",
"OUT_ZERO_TEMP_PERIOD_2",
],
{
"OUT_ZERO_TEMP_PERIOD": [
"OUT_ZERO_TEMP_PERIOD_1",
"OUT_ZERO_TEMP_PERIOD_2",
]
},
),
(
[
"RUN_ID",
"RF_REGIONS_CH4OXSTRATH2O_2",
"RF_REGIONS_CH4OXSTRATH2O_1",
"RF_REGIONS_CH4OXSTRATH2O_3",
"RF_REGIONS_CH4OXSTRATH2O_4",
"RF_REGIONS_CIRRUS_1",
"RF_REGIONS_CIRRUS_2",
"RF_REGIONS_CIRRUS_3",
"RF_REGIONS_CIRRUS_4",
"SRF_FACTOR_LANDUSE",
],
{
"RF_REGIONS_CH4OXSTRATH2O": [
"RF_REGIONS_CH4OXSTRATH2O_1",
"RF_REGIONS_CH4OXSTRATH2O_2",
"RF_REGIONS_CH4OXSTRATH2O_3",
"RF_REGIONS_CH4OXSTRATH2O_4",
],
"RF_REGIONS_CIRRUS": [
"RF_REGIONS_CIRRUS_1",
"RF_REGIONS_CIRRUS_2",
"RF_REGIONS_CIRRUS_3",
"RF_REGIONS_CIRRUS_4",
],
},
),
(
[
"RUN_ID",
"FGAS_H_ATOMS_1",
"FGAS_H_ATOMS_2",
"FGAS_H_ATOMS_4",
"FGAS_H_ATOMS_4",
"FGAS_H_ATOMS_5",
"FGAS_H_ATOMS_6",
"FGAS_H_ATOMS_7",
"SRF_FACTOR_LANDUSE",
],
{
"FGAS_H_ATOMS": [
"FGAS_H_ATOMS_1",
"FGAS_H_ATOMS_2",
"FGAS_H_ATOMS_4",
"FGAS_H_ATOMS_4",
"FGAS_H_ATOMS_5",
"FGAS_H_ATOMS_6",
"FGAS_H_ATOMS_7",
]
},
),
(["CORE_CLIMATESENSITIVITY", "FILE_TUNINGMODEL", "FILE_TUNINGMODEL_2"], {}),
(["CORE_CLIMATESENSITIVITY", "OUT_KEYDATA_1", "OUT_KEYDATA_2"], {}),
(
[
"CORE_CLIMATESENSITIVITY",
"FILE_EMISSCEN",
"FILE_EMISSCEN_2",
"OUT_KEYDATA_1",
"OUT_KEYDATA_2",
],
{},
),
),
)
@pytest.mark.parametrize("case", ("upper", "lower", "capital"))
def test_find_parameter_groups(start_list, expected, case):
if case == "upper":
start_list = [v.upper() for v in start_list]
expected = {k.upper(): [vv.upper() for vv in v] for k, v in expected.items()}
elif case == "lower":
start_list = [v.lower() for v in start_list]
expected = {k.lower(): [vv.lower() for vv in v] for k, v in expected.items()}
elif case == "capital":
start_list = [v.capitalize() for v in start_list]
expected = {
k.capitalize(): [vv.capitalize() for vv in v] for k, v in expected.items()
}
else:
raise NotImplementedError()
assert find_parameter_groups(start_list) == expected
@pytest.mark.parametrize(
"inp_file",
[
join(TEST_DATA_DIR, "bin_legacy", "DAT_SURFACE_TEMP.BINOUT"),
join(TEST_DATA_DIR, "bin_v2", "DAT_SURFACE_TEMP.BINOUT"),
],
)
def test_binary_reader_fourbox(inp_file):
res = MAGICCData(inp_file)
assert res.get_unique_meta("variable", no_duplicates=True) == "Surface Temperature"
assert set(res.get_unique_meta("region")) == {
"World",
"World|Northern Hemisphere|Land",
"World|Southern Hemisphere|Land",
"World|Northern Hemisphere|Ocean",
"World|Southern Hemisphere|Ocean",
}
@pytest.mark.parametrize(
"inp_file",
[
join(TEST_DATA_DIR, "bin_legacy", "DAT_CO2_AIR2LAND_FLUX.BINOUT"),
join(TEST_DATA_DIR, "bin_v2", "DAT_CO2_AIR2LAND_FLUX.BINOUT"),
],
)
def test_binary_reader_global_only(inp_file):
res = MAGICCData(inp_file)
assert (
res.get_unique_meta("variable", no_duplicates=True)
== "Net Atmosphere to Land Flux|CO2"
)
assert res.get_unique_meta("region", no_duplicates=True) == "World"
def test_binary_reader_different_versions():
res_legacy = MAGICCData(
join(TEST_DATA_DIR, "bin_legacy", "DAT_SURFACE_TEMP.BINOUT")
)
res_v2 = MAGICCData(join(TEST_DATA_DIR, "bin_v2", "DAT_SURFACE_TEMP.BINOUT"))
assert res_v2.get_unique_meta("unit", True) == "K"
assert res_legacy.get_unique_meta("unit", True) == "unknown"
meta_columns = res_v2.meta.columns.drop("unit")
pd.testing.assert_frame_equal(
res_v2.timeseries(meta_columns),
res_legacy.timeseries(meta_columns),
check_column_type=False,
)
def test_binary_reader_different_versions_global_only():
res_legacy = MAGICCData(
join(TEST_DATA_DIR, "bin_legacy", "DAT_CO2_AIR2LAND_FLUX.BINOUT")
)
res_v2 = MAGICCData(join(TEST_DATA_DIR, "bin_v2", "DAT_CO2_AIR2LAND_FLUX.BINOUT"))
assert res_v2.get_unique_meta("unit", True) == "GtC / yr"
assert res_legacy.get_unique_meta("unit", True) == "unknown"
meta_columns = res_v2.meta.columns.drop("unit")
pd.testing.assert_frame_equal(
res_v2.timeseries(meta_columns),
res_legacy.timeseries(meta_columns),
check_column_type=False,
)
|
from django.urls import path
from django.contrib.auth.views import LogoutView
from . import views
urlpatterns = [
path('', views.sign_up, name='user_sign'),
path('dashboard', views.user_dashboard, name='user_dashboard'),
path('login', views.sign_in, name='login_user'),
path('logout', LogoutView.as_view(next_page='/'), name='logout'),
path('users', views.list_registered_users, name='system_users'),
path('activate/user/<int:user_id>', views.user_activate, name='activate_user'),
path('deactivate/user/<int:user_id>', views.user_deactivate, name='deactivate_user'),
]
|
from __future__ import print_function
import time
import weakref
from resumeback import send_self, StrongGeneratorWrapper, GeneratorWrapper
from . import defer, State
def test_constructors():
def func():
yield # pragma: no cover
generator = func()
wrappers = [StrongGeneratorWrapper(generator),
GeneratorWrapper(weakref.ref(generator))]
for wrapper in wrappers:
assert type(wrapper.weak_generator) is weakref.ref
assert wrapper.weak_generator() is generator
assert wrapper.catch_stopiteration is True
assert wrapper.debug is False
def test_equal():
def func():
yield # pragma: no cover
generator = func()
assert (StrongGeneratorWrapper(generator)
== StrongGeneratorWrapper(generator))
assert (GeneratorWrapper(weakref.ref(generator))
== GeneratorWrapper(weakref.ref(generator)))
assert (StrongGeneratorWrapper(generator)
!= GeneratorWrapper(weakref.ref(generator)))
def test_with_weak_ref():
# Also checks preservance of weak_generator object
ts = State()
# Note that `weakref.ref(obj) is weakref.ref(obj)`
# always holds true,
# unless you specify a callback parameter
# for either of the constructors.
# However, even then they compare equal.
@send_self(finalize_callback=print)
def func(this):
thises = [
this,
this.with_weak_ref(),
this.with_strong_ref().with_weak_ref(),
this.with_strong_ref().with_strong_ref().with_weak_ref(),
this()()
]
comp_ref = GeneratorWrapper(weakref.ref(this.generator))
for i, that in enumerate(thises):
assert type(that) is GeneratorWrapper, i
assert that == this
assert that.weak_generator is this.weak_generator
assert comp_ref.weak_generator is not that.weak_generator
assert comp_ref.weak_generator == that.weak_generator
ts.run = True
if False: # Turn into a generator function
yield
func()
assert ts.run
def test_with_strong_ref():
ts = State()
# See test_with_weak_ref
@send_self(finalize_callback=print)
def func(this):
this_strong = this.with_strong_ref()
thises = [
this_strong,
this_strong.with_strong_ref(),
this_strong.with_weak_ref().with_strong_ref(),
this_strong.with_weak_ref().with_weak_ref().with_strong_ref(),
this_strong()()
]
comp_ref = StrongGeneratorWrapper(this.generator)
for i, that in enumerate(thises):
assert type(that) is StrongGeneratorWrapper, i
assert that == this_strong
assert that.weak_generator is this.weak_generator
assert comp_ref.weak_generator is not that.weak_generator
assert comp_ref.weak_generator == that.weak_generator
del thises
del comp_ref
ts.run = True
if False: # Turn into a generator function
yield
func()
assert ts.run
def test_has_terminated_simple():
ts = State()
@send_self
def func(_):
ts.run = True
if False: # Turn into a generator function
yield
assert func().has_terminated()
assert ts.run
def test_has_terminated():
ts = State()
def cb(this):
assert not this.has_terminated()
this.send_wait(True)
@send_self
def func2(this):
assert not this.has_terminated()
ts.run = yield defer(cb, this, sleep=0)
yield
wrapper = func2()
time.sleep(0.1)
assert ts.run
assert not wrapper.has_terminated()
wrapper.next()
assert wrapper.has_terminated()
|
import os
import numpy as np
import torch
import torch.nn as nn
from .pu_net import PUNet
from ..drop_points import SORDefense
class DUPNet(nn.Module):
def __init__(self, sor_k=2, sor_alpha=1.1,
npoint=1024, up_ratio=4):
super(DUPNet, self).__init__()
self.npoint = npoint
self.sor = SORDefense(k=sor_k, alpha=sor_alpha)
self.pu_net = PUNet(npoint=self.npoint, up_ratio=up_ratio,
use_normal=False, use_bn=False, use_res=False)
def process_data(self, pc, npoint=None):
"""Process point cloud data to be suitable for
PU-Net input.
We do two things:
sample npoint or duplicate to npoint.
Args:
pc (torch.FloatTensor): list input, [(N_i, 3)] from SOR.
Need to pad or trim to [B, self.npoint, 3].
"""
if npoint is None:
npoint = self.npoint
B = len(pc)
proc_pc = torch.zeros((B, npoint, 3)).float().cuda()
for pc_idx in range(B):
one_pc = pc[pc_idx]
# [N_i, 3]
N = len(one_pc)
if N > npoint:
# random sample some of them
idx = np.random.choice(N, npoint, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = one_pc[idx]
elif N < npoint:
# just duplicate to the number
duplicated_pc = one_pc
num = npoint // N - 1
for i in range(num):
duplicated_pc = torch.cat([
duplicated_pc, one_pc
], dim=0)
num = npoint - len(duplicated_pc)
# random sample the remaining
idx = np.random.choice(N, num, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = torch.cat([
duplicated_pc, one_pc[idx]
], dim=0)
proc_pc[pc_idx] = one_pc
return proc_pc
def forward(self, x):
with torch.no_grad():
x = self.sor(x) # a list of pc
x = self.process_data(x) # to batch input
x = self.pu_net(x) # [B, N * r, 3]
return x
|
from django.db import models
from ordered_model.models import OrderedModel
# Create your models here.
TRUNK_MAX_LENGTH = 100
class Trunk(models.Model):
name = models.CharField(blank=True, max_length=TRUNK_MAX_LENGTH)
sub_title = models.CharField(blank=True, max_length=TRUNK_MAX_LENGTH)
cols = models.IntegerField(default=4)
def __str__(self):
return self.name
class EyeKey(OrderedModel):
name = models.CharField(blank=True, max_length=TRUNK_MAX_LENGTH)
label = models.CharField(blank=True, max_length=3)
trunk = models.ForeignKey(Trunk, on_delete=models.CASCADE, null=True, related_name="eye_keys")
row = models.IntegerField(default=1)
col = models.IntegerField(default=1)
col_size = models.IntegerField(default=1)
order_with_respect_to = 'trunk'
def __str__(self):
output = self.name
if self.trunk:
output += " | " + self.trunk.name
return output
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
import pytest
import zope.interface
from gordon import exceptions
from gordon import interfaces
from gordon import plugins_loader
from gordon.metrics import ffwd
from tests.unit import conftest
#####
# Fixtures
#####
@pytest.fixture(scope='session')
def namespaced_config():
return {
'event_consumer': {'a_key': 'a_value', 'b_key': 'b_value'},
'event_consumer.plugin': {'a_key': 'another_value'},
'enricher': {},
'enricher.plugin': {'d_key': 'd_value'}
}
@pytest.fixture(scope='session')
def plugin_config():
return {
'xyz': {
'a_key': 'a_value',
'b_key': 'b_value',
},
'xyz.event_consumer': {
'a_key': 'another_value',
'b_key': 'b_value'
},
'xyz.enricher': {
'a_key': 'a_value',
'b_key': 'b_value',
'd_key': 'd_value',
},
'xyz.publisher': {
'a_key': 'a_value',
'b_key': 'b_value',
'c_key': 'c_value',
}
}
@pytest.fixture
def exp_inited_plugins(plugin_config, plugin_kwargs):
return [
conftest.EventConsumerStub(
plugin_config['xyz.event_consumer'], **plugin_kwargs),
conftest.EnricherStub(
plugin_config['xyz.enricher'], **plugin_kwargs),
conftest.PublisherStub(
plugin_config['xyz.publisher'], **plugin_kwargs)
]
@pytest.fixture
def mock_iter_entry_points(mocker, monkeypatch, installed_plugins):
mock_plugins = installed_plugins.values()
mock_iter_entry_points = mocker.MagicMock(pkg_resources.iter_entry_points)
mock_iter_entry_points.return_value = iter(mock_plugins)
monkeypatch.setattr(plugins_loader.pkg_resources, 'iter_entry_points',
mock_iter_entry_points)
def is_instance_of_stub(obj):
stubs = [
conftest.EventConsumerStub,
conftest.EnricherStub,
conftest.PublisherStub,
conftest.GenericStub
]
return any([isinstance(obj, stub) for stub in stubs])
#####
# The good stuff
#####
def test_init_plugins(installed_plugins, plugin_config, inited_plugins,
plugin_kwargs):
"""Plugins are initialized with their config."""
inited_names, inited_plugins, errors = plugins_loader._init_plugins(
conftest.REGISTERED_ACTIVE_PLUGINS, installed_plugins, plugin_config,
plugin_kwargs
)
assert sorted(conftest.REGISTERED_ACTIVE_PLUGINS) == sorted(inited_names)
for plugin_obj in inited_plugins:
assert is_instance_of_stub(plugin_obj)
assert any([p.config == plugin_obj.config for p in inited_plugins])
def test_init_plugins_exceptions(mocker, plugin_kwargs):
"""Non-callable plugin returns plugin-specific exceptions."""
name = 'B0rkedPlugin'
config = {'B0rkedPlugin': {'foo': 'bar'}}
plugin_mock = mocker.MagicMock(pkg_resources.EntryPoint, autospec=True)
plugin_mock.name = name
plugin_mock.load.return_value = 'not_a_class'
plugins = {name: plugin_mock}
inited_names, inited_plugins, errors = plugins_loader._init_plugins(
[name], plugins, config, plugin_kwargs)
assert 1 == len(errors)
def test_init_plugins_skipped(installed_plugins, plugin_config, caplog,
plugin_kwargs):
"""Skips plugins that are not configured."""
config = {'xyz.event_consumer': plugin_config['xyz.event_consumer']}
inited_names, inited_plugins, errors = plugins_loader._init_plugins(
conftest.REGISTERED_ACTIVE_PLUGINS, installed_plugins, config,
plugin_kwargs
)
assert 1 == len(inited_plugins) == len(inited_names)
assert 2 == len(caplog.records)
def test_init_plugins_empty_config(installed_plugins, plugin_kwargs):
"""Loads plugin if mathcing config key exists with empty config."""
config = {name: {} for name in conftest.REGISTERED_ACTIVE_PLUGINS}
inited_names, inited_plugins, errors = plugins_loader._init_plugins(
conftest.REGISTERED_ACTIVE_PLUGINS, installed_plugins, config,
plugin_kwargs
)
assert 3 == len(inited_plugins) == len(inited_names)
for plugin_obj in inited_plugins:
assert {} == plugin_obj.config
def test_init_plugins_skip_inactive(installed_plugins, plugin_config,
plugin_kwargs):
"""Skips plugins that are not activated in core config."""
inited_names, inited_plugins, errors = plugins_loader._init_plugins(
[conftest.REGISTERED_ACTIVE_PLUGINS[0]], installed_plugins,
plugin_config, plugin_kwargs)
assert 1 == len(inited_plugins) == len(inited_names)
exp = plugin_config.get(conftest.REGISTERED_ACTIVE_PLUGINS[0])
assert exp == inited_plugins[0].config
@pytest.mark.parametrize('namespace,exp_config', (
('event_consumer', {'a_key': 'a_value', 'b_key': 'b_value'}),
('event_consumer.plugin', {'a_key': 'another_value', 'b_key': 'b_value'}),
('enricher', {}),
('enricher.plugin', {'d_key': 'd_value'})
))
def test_merge_config(namespace, exp_config, namespaced_config):
"""Namespaced config for a plugin also has parent/global config."""
ret_config = plugins_loader._merge_config(namespaced_config, namespace)
assert exp_config == ret_config
@pytest.mark.parametrize('namespace,exp_config', (
('xyz', {'a_key': 'a_value', 'b_key': 'b_value'}),
('xyz.event_consumer', {'a_key': 'another_value'}),
('xyz.enricher', {'d_key': 'd_value'}),
))
def test_get_namespaced_config(namespace, exp_config, installed_plugins,
loaded_config):
"""Tease out config specific to a plugin with no parent config."""
all_plugins = installed_plugins.keys()
ret_namespace, ret_config = plugins_loader._get_namespaced_config(
loaded_config, namespace, all_plugins)
assert exp_config == ret_config
assert namespace == ret_namespace
def test_load_plugin_configs(installed_plugins, loaded_config, plugin_config):
"""Load plugin-specific config ignoring other plugins' configs."""
plugin_names = ['xyz'] + conftest.REGISTERED_ACTIVE_PLUGINS
parsed_config = plugins_loader._load_plugin_configs(
plugin_names, loaded_config)
for name in conftest.REGISTERED_ACTIVE_PLUGINS:
assert plugin_config[name] == parsed_config[name]
def test_get_plugin_config_keys(installed_plugins):
"""Entry point keys for plugins are parsed to config keys."""
config_keys = plugins_loader._get_plugin_config_keys(installed_plugins)
expected = ['xyz'] + conftest.REGISTERED_PLUGINS
assert sorted(expected) == sorted(config_keys)
def test_get_activated_plugins(loaded_config, installed_plugins):
"""Assert activated plugins are installed."""
active = plugins_loader._get_activated_plugins(
loaded_config, installed_plugins)
assert conftest.REGISTERED_ACTIVE_PLUGINS == active
def test_get_activated_plugins_raises(loaded_config, installed_plugins):
"""Raise when activated plugins are not installed."""
loaded_config['core']['plugins'].append('xyz.not_installed_plugin')
with pytest.raises(exceptions.LoadPluginError) as e:
plugins_loader._get_activated_plugins(loaded_config, installed_plugins)
e.match('Plugin "xyz.not_installed_plugin" not installed')
def test_gather_installed_plugins(mock_iter_entry_points, installed_plugins):
"""Gather entry points/plugins into a {name: entry point} format."""
gathered_plugins = plugins_loader._gather_installed_plugins()
assert sorted(installed_plugins) == sorted(gathered_plugins)
def test_load_plugins(mock_iter_entry_points, loaded_config, installed_plugins,
exp_inited_plugins, plugin_kwargs):
"""Plugins are loaded and instantiated with their config."""
inited_names, installed_plugins, errors, _ = plugins_loader.load_plugins(
loaded_config, plugin_kwargs)
assert 3 == len(inited_names) == len(installed_plugins)
for plugin_obj in installed_plugins:
assert is_instance_of_stub(plugin_obj)
assert any([p.config == plugin_obj.config for p in exp_inited_plugins])
def test_load_plugins_none_loaded(mocker, installed_plugins, plugin_kwargs):
"""Return empty list when no plugins are found."""
mock_iter_entry_points = mocker.MagicMock(pkg_resources.iter_entry_points)
mock_iter_entry_points.return_value = []
loaded_config = {'core': {}}
inited_names, installed_plugins, errors, _ = plugins_loader.load_plugins(
loaded_config, plugin_kwargs)
assert [] == installed_plugins == inited_names == errors
def test_load_plugins_exceptions(installed_plugins, loaded_config,
mock_iter_entry_points, plugin_exc_mock,
plugin_kwargs, mocker, monkeypatch):
"""Loading plugin exceptions are returned."""
inited_plugins_mock = mocker.MagicMock(
plugins_loader._init_plugins, autospec=True)
exc = [('bad.plugin', plugin_exc_mock)]
inited_plugins_mock.return_value = (
conftest.REGISTERED_PLUGINS, inited_plugins_mock, exc)
monkeypatch.setattr(plugins_loader, '_init_plugins', inited_plugins_mock)
inited_names, installed_plugins, errors, _ = plugins_loader.load_plugins(
loaded_config, plugin_kwargs)
assert 1 == len(errors)
@zope.interface.implementer(interfaces.IMetricRelay)
class MetricRelayStub:
def __init__(self, config):
pass
@pytest.fixture
def metrics_mock(mocker):
relay_mock = mocker.MagicMock(pkg_resources.EntryPoint)
relay_mock.name = 'mock-provider-name'
relay_mock.load.return_value = MetricRelayStub
return relay_mock
@pytest.fixture
def plugins_incl_metrics(mocker, monkeypatch, metrics_mock, installed_plugins):
installed_plugins[metrics_mock.name] = metrics_mock
mock_iter_entry_points = mocker.Mock(pkg_resources.iter_entry_points)
mock_iter_entry_points.return_value = iter(installed_plugins.values())
monkeypatch.setattr(plugins_loader.pkg_resources, 'iter_entry_points',
mock_iter_entry_points)
return installed_plugins
def test_load_plugins_with_metrics(plugins_incl_metrics, loaded_config,
exp_inited_plugins, plugin_kwargs,
metrics_mock):
"""Plugins are loaded and instantiated with their config and metrics."""
loaded_config['core'].update({'metrics': metrics_mock.name})
names, installed_plugins, errors, plugin_kw = plugins_loader.load_plugins(
loaded_config, plugin_kwargs)
# if metrics were included, len() would be 4
assert 3 == len(names) == len(installed_plugins)
for plugin_obj in installed_plugins:
assert not isinstance(plugin_obj, MetricRelayStub)
assert is_instance_of_stub(plugin_obj)
assert any([p.config == plugin_obj.config for p in exp_inited_plugins])
assert isinstance(plugin_kw['metrics'], MetricRelayStub)
def test_get_metrics_returns_ffwd(loaded_config, plugins_incl_metrics):
loaded_config['core'].update({'metrics': 'ffwd'})
actual = plugins_loader._get_metrics_plugin(
loaded_config, plugins_incl_metrics)
assert isinstance(actual, ffwd.SimpleFfwdRelay)
def test_get_metrics_returns_plugin(metrics_mock, plugins_incl_metrics):
"""MetricRelay should load if both implements interface and configured."""
config = {'core': {'metrics': 'mock-provider-name'}}
actual = plugins_loader._get_metrics_plugin(config, plugins_incl_metrics)
assert isinstance(actual, MetricRelayStub)
def test_get_metrics_not_installed_raises(installed_plugins):
"""Return None if config or name incorrect."""
config = {'core': {'metrics': 'non-installed-metrics-provider'}}
with pytest.raises(exceptions.LoadPluginError) as e:
plugins_loader._get_metrics_plugin(config, installed_plugins)
assert e.match('Metrics.*non-installed-metrics-provider.*not installed')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.